diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index f5ab1b6..2e3d3eb 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -11,7 +11,8 @@ jobs: timeout-minutes: 10 strategy: matrix: - python-version: ['3.8', '3.9', '3.10'] + python-version: ['3.9', '3.10', '3.11', '3.12'] + numpy-version: ['numpy1', 'numpy2'] steps: - uses: actions/checkout@v3 @@ -23,5 +24,14 @@ jobs: run: | python -m pip install --upgrade pip pip install tox tox-gh-actions + - name: python version + env: + TOXENV: "py${{ matrix.python }}-${{ matrix.numpy-version }}" + run: | + TOXENV=${{ env.TOXENV }} + TOXENV=${TOXENV//.} # replace all dots + echo TOXENV=${TOXENV} >> $GITHUB_ENV # update GitHub ENV vars + - name: print env + run: echo ${{ env.TOXENV }} - name: Test with tox run: tox diff --git a/README.rst b/README.rst index c1573bc..a2fc7ac 100644 --- a/README.rst +++ b/README.rst @@ -300,6 +300,39 @@ Extending Mesh objects # Show the plot to the screen pyplot.show() +Creating a single triangle +---------------------------------- + +.. code-block:: python + + import numpy + from stl import mesh + + # A unit triangle + tri_vectors = [[0,0,0],[0,1,0],[0,0,1]] + + # Create the vector data. It’s a numpy structured array with N entries, where N is the number of triangles (here N=1), and each entry is in the format ('normals','vectors','attr') + data = numpy.array([( + 0, # Set 'normals' to zero, and the mesh class will automatically calculate them at initialization + tri_vectors, # 'vectors' + 0 # 'attr' + )], dtype = mesh.Mesh.dtype) # The structure defined by the mesh class (N x ('normals','vectors','attr')) + + # Create the mesh object from the structured array + tri_mesh = mesh.Mesh(data) + + # Optionally make a plot for fun + # Load the plot tools + from matplotlib import pyplot + from mpl_toolkits import mplot3d + + # Create a new plot + figure = pyplot.figure() + axes = figure.add_subplot(projection='3d') + + # Add mesh to plot + axes.add_collection3d(mplot3d.art3d.Poly3DCollection(tri_mesh.vectors)) # Just need the 'vectors' attribute for display + Creating Mesh objects from a list of vertices and faces ------------------------------------------------------------------------------ diff --git a/appveyor.yml b/appveyor.yml index 6827176..6586d95 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -28,7 +28,7 @@ install: build: false # Not a C# project, build stuff at the test step instead. before_test: - - py -m pip install tox numpy cython wheel + - py -m pip install tox numpy cython wheel setuptools test_script: - "py -m tox -e %TOXENV%" diff --git a/docs/_theme/flask_theme_support.py b/docs/_theme/flask_theme_support.py index 555c116..6915638 100644 --- a/docs/_theme/flask_theme_support.py +++ b/docs/_theme/flask_theme_support.py @@ -1,86 +1,89 @@ # flasky extensions. flasky pygments style based on tango style from pygments.style import Style -from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic, Whitespace, Punctuation, Other, Literal +from pygments.token import ( + Comment, + Error, + Generic, + Keyword, + Literal, + Name, + Number, + Operator, + Other, + Punctuation, + String, + Whitespace, +) class FlaskyStyle(Style): - background_color = "#f8f8f8" - default_style = "" + background_color = '#f8f8f8' + default_style = '' styles = { # No corresponding class for the following: # Text: "", # class: '' - Whitespace: "underline #f8f8f8", # class: 'w' - Error: "#a40000 border:#ef2929", # class: 'err' - Other: "#000000", # class 'x' - - Comment: "italic #8f5902", # class: 'c' - Comment.Preproc: "noitalic", # class: 'cp' - - Keyword: "bold #004461", # class: 'k' - Keyword.Constant: "bold #004461", # class: 'kc' - Keyword.Declaration: "bold #004461", # class: 'kd' - Keyword.Namespace: "bold #004461", # class: 'kn' - Keyword.Pseudo: "bold #004461", # class: 'kp' - Keyword.Reserved: "bold #004461", # class: 'kr' - Keyword.Type: "bold #004461", # class: 'kt' - - Operator: "#582800", # class: 'o' - Operator.Word: "bold #004461", # class: 'ow' - like keywords - - Punctuation: "bold #000000", # class: 'p' - + Whitespace: 'underline #f8f8f8', # class: 'w' + Error: '#a40000 border:#ef2929', # class: 'err' + Other: '#000000', # class 'x' + Comment: 'italic #8f5902', # class: 'c' + Comment.Preproc: 'noitalic', # class: 'cp' + Keyword: 'bold #004461', # class: 'k' + Keyword.Constant: 'bold #004461', # class: 'kc' + Keyword.Declaration: 'bold #004461', # class: 'kd' + Keyword.Namespace: 'bold #004461', # class: 'kn' + Keyword.Pseudo: 'bold #004461', # class: 'kp' + Keyword.Reserved: 'bold #004461', # class: 'kr' + Keyword.Type: 'bold #004461', # class: 'kt' + Operator: '#582800', # class: 'o' + Operator.Word: 'bold #004461', # class: 'ow' - like keywords + Punctuation: 'bold #000000', # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. - Name: "#000000", # class: 'n' - Name.Attribute: "#c4a000", # class: 'na' - to be revised - Name.Builtin: "#004461", # class: 'nb' - Name.Builtin.Pseudo: "#3465a4", # class: 'bp' - Name.Class: "#000000", # class: 'nc' - to be revised - Name.Constant: "#000000", # class: 'no' - to be revised - Name.Decorator: "#888", # class: 'nd' - to be revised - Name.Entity: "#ce5c00", # class: 'ni' - Name.Exception: "bold #cc0000", # class: 'ne' - Name.Function: "#000000", # class: 'nf' - Name.Property: "#000000", # class: 'py' - Name.Label: "#f57900", # class: 'nl' - Name.Namespace: "#000000", # class: 'nn' - to be revised - Name.Other: "#000000", # class: 'nx' - Name.Tag: "bold #004461", # class: 'nt' - like a keyword - Name.Variable: "#000000", # class: 'nv' - to be revised - Name.Variable.Class: "#000000", # class: 'vc' - to be revised - Name.Variable.Global: "#000000", # class: 'vg' - to be revised - Name.Variable.Instance: "#000000", # class: 'vi' - to be revised - - Number: "#990000", # class: 'm' - - Literal: "#000000", # class: 'l' - Literal.Date: "#000000", # class: 'ld' - - String: "#4e9a06", # class: 's' - String.Backtick: "#4e9a06", # class: 'sb' - String.Char: "#4e9a06", # class: 'sc' - String.Doc: "italic #8f5902", # class: 'sd' - like a comment - String.Double: "#4e9a06", # class: 's2' - String.Escape: "#4e9a06", # class: 'se' - String.Heredoc: "#4e9a06", # class: 'sh' - String.Interpol: "#4e9a06", # class: 'si' - String.Other: "#4e9a06", # class: 'sx' - String.Regex: "#4e9a06", # class: 'sr' - String.Single: "#4e9a06", # class: 's1' - String.Symbol: "#4e9a06", # class: 'ss' - - Generic: "#000000", # class: 'g' - Generic.Deleted: "#a40000", # class: 'gd' - Generic.Emph: "italic #000000", # class: 'ge' - Generic.Error: "#ef2929", # class: 'gr' - Generic.Heading: "bold #000080", # class: 'gh' - Generic.Inserted: "#00A000", # class: 'gi' - Generic.Output: "#888", # class: 'go' - Generic.Prompt: "#745334", # class: 'gp' - Generic.Strong: "bold #000000", # class: 'gs' - Generic.Subheading: "bold #800080", # class: 'gu' - Generic.Traceback: "bold #a40000", # class: 'gt' + Name: '#000000', # class: 'n' + Name.Attribute: '#c4a000', # class: 'na' - to be revised + Name.Builtin: '#004461', # class: 'nb' + Name.Builtin.Pseudo: '#3465a4', # class: 'bp' + Name.Class: '#000000', # class: 'nc' - to be revised + Name.Constant: '#000000', # class: 'no' - to be revised + Name.Decorator: '#888', # class: 'nd' - to be revised + Name.Entity: '#ce5c00', # class: 'ni' + Name.Exception: 'bold #cc0000', # class: 'ne' + Name.Function: '#000000', # class: 'nf' + Name.Property: '#000000', # class: 'py' + Name.Label: '#f57900', # class: 'nl' + Name.Namespace: '#000000', # class: 'nn' - to be revised + Name.Other: '#000000', # class: 'nx' + Name.Tag: 'bold #004461', # class: 'nt' - like a keyword + Name.Variable: '#000000', # class: 'nv' - to be revised + Name.Variable.Class: '#000000', # class: 'vc' - to be revised + Name.Variable.Global: '#000000', # class: 'vg' - to be revised + Name.Variable.Instance: '#000000', # class: 'vi' - to be revised + Number: '#990000', # class: 'm' + Literal: '#000000', # class: 'l' + Literal.Date: '#000000', # class: 'ld' + String: '#4e9a06', # class: 's' + String.Backtick: '#4e9a06', # class: 'sb' + String.Char: '#4e9a06', # class: 'sc' + String.Doc: 'italic #8f5902', # class: 'sd' - like a comment + String.Double: '#4e9a06', # class: 's2' + String.Escape: '#4e9a06', # class: 'se' + String.Heredoc: '#4e9a06', # class: 'sh' + String.Interpol: '#4e9a06', # class: 'si' + String.Other: '#4e9a06', # class: 'sx' + String.Regex: '#4e9a06', # class: 'sr' + String.Single: '#4e9a06', # class: 's1' + String.Symbol: '#4e9a06', # class: 'ss' + Generic: '#000000', # class: 'g' + Generic.Deleted: '#a40000', # class: 'gd' + Generic.Emph: 'italic #000000', # class: 'ge' + Generic.Error: '#ef2929', # class: 'gr' + Generic.Heading: 'bold #000080', # class: 'gh' + Generic.Inserted: '#00A000', # class: 'gi' + Generic.Output: '#888', # class: 'go' + Generic.Prompt: '#745334', # class: 'gp' + Generic.Strong: 'bold #000000', # class: 'gs' + Generic.Subheading: 'bold #800080', # class: 'gu' + Generic.Traceback: 'bold #a40000', # class: 'gt' } diff --git a/docs/conf.py b/docs/conf.py index 2339c01..ab0af5f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Documentation build configuration file, created by # sphinx-quickstart on Thu Feb 27 20:00:23 2014. @@ -12,18 +11,18 @@ # All configuration values have a default; values that are commented out # serve to show the default. +import datetime import os import sys -import datetime try: - import numpy - assert numpy + import numpy as np + + assert np except ImportError: # From the readthedocs manual # http://read-the-docs.readthedocs.org/en/latest/faq.html?highlight=numpy - print >>sys.stderr, 'Unable to import numpy, falling back to mock' - import mock + from unittest import mock MOCK_MODULES = ['pygtk', 'gtk', 'gobject', 'argparse', 'numpy', 'pandas'] for mod_name in MOCK_MODULES: @@ -35,11 +34,10 @@ sys.path.insert(0, os.path.abspath('..')) from stl import __about__ as metadata - # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -62,17 +60,14 @@ source_suffix = '.rst' # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = metadata.__package_name__.replace('-', ' ').capitalize() -copyright = u'%s, %s' % ( - datetime.date.today().year, - metadata.__author__, -) +copyright = f'{datetime.date.today().year}, {metadata.__author__}' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -85,13 +80,13 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -99,27 +94,27 @@ # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# If true, keep warnings as 'system message' paragraphs in the built documents. +# keep_warnings = False # -- Options for HTML output ---------------------------------------------- @@ -131,77 +126,77 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None +# ' v documentation'. +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". +# so a file named 'default.css' will overwrite the builtin 'default.css'. # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# If true, 'Created using Sphinx' is shown in the HTML footer. Default is True. +# html_show_sphinx = True -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# If true, '(C) Copyright ...' is shown in the HTML footer. Default is True. +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# This is the file name suffix for HTML files (e.g. '.xhtml'). +# html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = metadata.__package_name__ + '-doc' @@ -210,61 +205,67 @@ # -- Options for LaTeX output --------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [( - 'index', - '%s.tex' % metadata.__package_name__, - u'%s Documentation' % metadata.__package_name__.replace('-', ' ').capitalize(), - metadata.__author__, - 'manual', -)] +# Grouping the document tree into LaTeX files. List of tuples (source start +# file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ( + 'index', + f'{metadata.__package_name__}.tex', + '{} Documentation'.format( + metadata.__package_name__.replace('-', ' ').capitalize() + ), + metadata.__author__, + 'manual', + ) +] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None -# For "manual" documents, if this is true, then toplevel headings are parts, +# For 'manual' documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [( - 'index', - metadata.__package_name__, - u'%s Documentation' % metadata.__package_name__.replace('-', ' ').capitalize(), - [metadata.__author__], - 1, -)] +man_pages = [ + ( + 'index', + metadata.__package_name__, + '{} Documentation'.format( + metadata.__package_name__.replace('-', ' ').capitalize() + ), + [metadata.__author__], + 1, + ) +] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -272,27 +273,31 @@ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) -texinfo_documents = [( - 'index', - metadata.__package_name__, - u'%s Documentation' % metadata.__package_name__.replace('-', ' ').capitalize(), - metadata.__author__, - metadata.__package_name__, - metadata.__description__, - 'Miscellaneous', -)] +texinfo_documents = [ + ( + 'index', + metadata.__package_name__, + '{} Documentation'.format( + metadata.__package_name__.replace('-', ' ').capitalize() + ), + metadata.__author__, + metadata.__package_name__, + metadata.__description__, + 'Miscellaneous', + ) +] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# If true, do not generate a @detailmenu in the 'Top' node's menu. +# texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- @@ -303,63 +308,62 @@ epub_publisher = metadata.__author__ epub_copyright = copyright -# The HTML theme for the epub output. Since the default themes are not optimized -# for small screen space, using the same theme for HTML and epub output is -# usually not wise. This defaults to 'epub', a theme designed to save visual -# space. -#epub_theme = 'epub' +# The HTML theme for the epub output. Since the default themes are not +# optimized for small screen space, using the same theme for HTML and epub +# output is usually not wise. This defaults to 'epub', a theme designed to +# save visual space. epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. -#epub_language = '' +# epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. -#epub_scheme = '' +# epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. -#epub_identifier = '' +# epub_identifier = '' # A unique identification for the text. -#epub_uid = '' +# epub_uid = '' # A tuple containing the cover image and cover page html template filenames. -#epub_cover = () +# epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. -#epub_guide = () +# epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. -#epub_pre_files = [] +# epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. -#epub_post_files = [] +# epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # The depth of the table of contents in toc.ncx. -#epub_tocdepth = 3 +# epub_tocdepth = 3 # Allow duplicate toc entries. -#epub_tocdup = True +# epub_tocdup = True # Choose between 'default' and 'includehidden'. -#epub_tocscope = 'default' +# epub_tocscope = 'default' # Fix unsupported image types using the PIL. -#epub_fix_images = False +# epub_fix_images = False # Scale large images. -#epub_max_image_width = 0 +# epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. -#epub_show_urls = 'inline' +# epub_show_urls = 'inline' # If false, no index is generated. -#epub_use_index = True +# epub_use_index = True # Example configuration for intersphinx: refer to the Python standard library. diff --git a/docs/requirements.txt b/docs/requirements.txt index 007ab87..142b6ca 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1 @@ -mock -python-utils +-e .[docs] diff --git a/pytest.ini b/pytest.ini index 9a40476..ce91b88 100644 --- a/pytest.ini +++ b/pytest.ini @@ -14,7 +14,3 @@ addopts = --no-cov-on-fail --ignore=build --basetemp=tmp - -flake8-ignore = - *.py W391 W504 - docs/*.py ALL diff --git a/ruff.toml b/ruff.toml new file mode 100644 index 0000000..d4a2734 --- /dev/null +++ b/ruff.toml @@ -0,0 +1,111 @@ +# We keep the ruff configuration separate so it can easily be shared across +# all projects + +target-version = 'py39' + +src = ['stl'] +exclude = [ + '.tox', + # Ignore local test files/directories/old-stuff + 'test.py', + '*_old.py', +] + +line-length = 79 + +[lint] +ignore = [ + 'A001', # Variable {name} is shadowing a Python builtin + 'A002', # Argument {name} is shadowing a Python builtin + 'A003', # Class attribute {name} is shadowing a Python builtin + 'B023', # function-uses-loop-variable + 'B024', # `FormatWidgetMixin` is an abstract base class, but it has no abstract methods + 'D205', # blank-line-after-summary + 'D212', # multi-line-summary-first-line + 'RET505', # Unnecessary `else` after `return` statement + 'TRY003', # Avoid specifying long messages outside the exception class + 'RET507', # Unnecessary `elif` after `continue` statement + 'C405', # Unnecessary {obj_type} literal (rewrite as a set literal) + 'C406', # Unnecessary {obj_type} literal (rewrite as a dict literal) + 'C408', # Unnecessary {obj_type} call (rewrite as a literal) + 'SIM114', # Combine `if` branches using logical `or` operator + 'RET506', # Unnecessary `else` after `raise` statement + 'Q001', # Remove bad quotes + 'Q002', # Remove bad quotes + 'FA100', # Missing `from __future__ import annotations`, but uses `typing.Optional` + 'COM812', # Missing trailing comma in a list + 'ISC001', # String concatenation with implicit str conversion + 'SIM108', # Ternary operators are not always more readable + 'RUF100', # Unused noqa directives. Due to multiple Python versions, we need to keep them +] + +select = [ + 'A', # flake8-builtins + 'ASYNC', # flake8 async checker + 'B', # flake8-bugbear + 'C4', # flake8-comprehensions + 'C90', # mccabe + 'COM', # flake8-commas + + ## Require docstrings for all public methods, would be good to enable at some point + # 'D', # pydocstyle + + 'E', # pycodestyle error ('W' for warning) + 'F', # pyflakes + 'FA', # flake8-future-annotations + 'I', # isort + 'ICN', # flake8-import-conventions + 'INP', # flake8-no-pep420 + 'ISC', # flake8-implicit-str-concat + 'N', # pep8-naming + 'NPY', # NumPy-specific rules + 'PERF', # perflint, + 'PIE', # flake8-pie + 'Q', # flake8-quotes + + 'RET', # flake8-return + 'RUF', # Ruff-specific rules + 'SIM', # flake8-simplify + 'T20', # flake8-print + 'TD', # flake8-todos + 'TRY', # tryceratops + 'UP', # pyupgrade +] + +[lint.per-file-ignores] +'tests/*' = ['SIM115', 'SIM117', 'T201', 'B007'] +'docs/*' = ['INP001', 'RUF012'] + +[lint.pydocstyle] +convention = 'google' +ignore-decorators = [ + 'typing.overload', + 'typing.override', +] + +[lint.isort] +case-sensitive = true +combine-as-imports = true +force-wrap-aliases = true + +[lint.flake8-quotes] +docstring-quotes = 'single' +inline-quotes = 'single' +multiline-quotes = 'single' + +[format] +line-ending = 'lf' +indent-style = 'space' +quote-style = 'single' +docstring-code-format = true +skip-magic-trailing-comma = false +exclude = [ + '__init__.py', +] + +[lint.pycodestyle] +max-line-length = 79 + +[lint.flake8-pytest-style] +mark-parentheses = true + diff --git a/setup.py b/setup.py index 6b44b05..7778889 100644 --- a/setup.py +++ b/setup.py @@ -1,43 +1,51 @@ import os import sys import warnings -from setuptools import setup, extension + +from setuptools import extension, setup from setuptools.command.build_ext import build_ext setup_kwargs = {} def error(*lines): - for line in lines: - print(line, file=sys.stderr) + for _line in lines: + pass try: from stl import stl + if not hasattr(stl, 'BaseStl'): - error('ERROR', - 'You have an incompatible stl package installed' - 'Please run "pip uninstall -y stl" first') + error( + 'ERROR', + 'You have an incompatible stl package installed' + 'Please run "pip uninstall -y stl" first', + ) sys.exit(1) except ImportError: pass try: - import numpy + import numpy as np from Cython import Build - setup_kwargs['ext_modules'] = Build.cythonize([ - extension.Extension( - 'stl._speedups', - ['stl/_speedups.pyx'], - include_dirs=[numpy.get_include()], - ), - ]) + setup_kwargs['ext_modules'] = Build.cythonize( + [ + extension.Extension( + 'stl._speedups', + ['stl/_speedups.pyx'], + include_dirs=[np.get_include()], + ), + ] + ) except ImportError: - error('WARNING', - 'Cython and Numpy is required for building extension.', - 'Falling back to pure Python implementation.') + error( + 'WARNING', + 'Cython and Numpy is required for building extension.', + 'Falling back to pure Python implementation.', + ) # To prevent importing about and thereby breaking the coverage info we use this # exec hack @@ -50,8 +58,9 @@ def error(*lines): with open('README.rst') as fh: long_description = fh.read() else: - long_description = 'See http://pypi.python.org/pypi/%s/' % ( - about['__package_name__']) + long_description = 'See http://pypi.python.org/pypi/{}/'.format( + about['__package_name__'] + ) install_requires = [ 'numpy', @@ -63,21 +72,23 @@ def error(*lines): class BuildExt(build_ext): - def run(self): try: build_ext.run(self) except Exception as e: - warnings.warn(''' + warnings.warn( + f""" Unable to build speedups module, defaulting to pure Python. Note that the pure Python version is more than fast enough in most cases - %r - ''' % e) + {e!r} + """, + stacklevel=2, + ) if __name__ == '__main__': setup( - python_requires='>3.6.0', + python_requires='>3.9.0', name=about['__package_name__'], version=about['__version__'], author=about['__author__'], @@ -91,9 +102,11 @@ def run(self): tests_require=tests_require, entry_points={ 'console_scripts': [ - 'stl = %s.main:main' % about['__import_name__'], - 'stl2ascii = %s.main:to_ascii' % about['__import_name__'], - 'stl2bin = %s.main:to_binary' % about['__import_name__'], + 'stl = {}.main:main'.format(about['__import_name__']), + 'stl2ascii = {}.main:to_ascii'.format( + about['__import_name__'] + ), + 'stl2bin = {}.main:to_binary'.format(about['__import_name__']), ], }, classifiers=[ @@ -115,6 +128,30 @@ def run(self): cmdclass=dict( build_ext=BuildExt, ), - **setup_kwargs + extras_require={ + 'docs': [ + 'mock', + 'sphinx', + 'python-utils', + ], + 'tests': [ + 'cov-core', + 'coverage', + 'docutils', + 'execnet', + 'numpy', + 'cython', + 'pep8', + 'py', + 'pyflakes', + 'pytest', + 'pytest-cache', + 'pytest-cov', + 'python-utils', + 'Sphinx', + 'flake8', + 'wheel', + ], + }, + **setup_kwargs, ) - diff --git a/stl/__about__.py b/stl/__about__.py index 6467f12..c4a719d 100644 --- a/stl/__about__.py +++ b/stl/__about__.py @@ -1,11 +1,12 @@ __package_name__ = 'numpy-stl' __import_name__ = 'stl' -__version__ = '3.1.2' +__version__ = '3.2.0' __author__ = 'Rick van Hattem' __author_email__ = 'Wolph@Wol.ph' -__description__ = ' '.join(''' +__description__ = ' '.join( + """ Library to make reading, writing and modifying both binary and ascii STL files easy. -'''.split()) +""".split() +) __url__ = 'https://github.com/WoLpH/numpy-stl/' - diff --git a/stl/__init__.py b/stl/__init__.py index ac98687..4ca0e60 100644 --- a/stl/__init__.py +++ b/stl/__init__.py @@ -1,12 +1,6 @@ -from .stl import BUFFER_SIZE -from .stl import HEADER_SIZE -from .stl import COUNT_SIZE -from .stl import MAX_COUNT - -from .stl import Mode -from .base import Dimension -from .base import RemoveDuplicates +from .base import Dimension, RemoveDuplicates from .mesh import Mesh +from .stl import BUFFER_SIZE, COUNT_SIZE, HEADER_SIZE, MAX_COUNT, Mode __all__ = [ 'BUFFER_SIZE', diff --git a/stl/base.py b/stl/base.py index f3953ce..27098f2 100644 --- a/stl/base.py +++ b/stl/base.py @@ -3,7 +3,7 @@ import logging import math -import numpy +import numpy as np try: # pragma: no cover from collections import abc @@ -36,10 +36,11 @@ class Dimension(enum.IntEnum): class RemoveDuplicates(enum.Enum): - ''' + """ Choose whether to remove no duplicates, leave only a single of the duplicates or remove all duplicates (leaving holes). - ''' + """ + NONE = 0 SINGLE = 1 ALL = 2 @@ -61,7 +62,7 @@ def logged(class_): # systems while this works on OS X. Please let me know if you can tell me # what silly mistake I made here - logger_name = logger.Logged._Logged__get_name( + logger_name = logger.Logged._Logged__get_name( # type: ignore[reportAttributeAccessIssue] __name__, class_.__name__, ) @@ -75,9 +76,26 @@ def logged(class_): return class_ +def _get_or_update(key): + def _get(self): + attr = f'_{key}' + if not hasattr(self, attr): + getattr(self, f'update_{key}')() + return getattr(self, attr) + + return _get + + +def _set(key): + def __set(self, value): + setattr(self, f'_{key}', value) + + return __set + + @logged -class BaseMesh(logger.Logged, abc.Mapping): - ''' +class BaseMesh(logger.Logged, abc.Mapping): # type: ignore[reportGeneralTypeIssues] + """ Mesh object with easy access to the vectors through v0, v1 and v2. The normals, areas, min, max and units are calculated automatically. @@ -100,93 +118,105 @@ class BaseMesh(logger.Logged, abc.Mapping): :ivar numpy.array v1: Points in vector 1 (Nx3) :ivar numpy.array v2: Points in vector 2 (Nx3) - >>> data = numpy.zeros(10, dtype=BaseMesh.dtype) + >>> data = np.zeros(10, dtype=BaseMesh.dtype) >>> mesh = BaseMesh(data, remove_empty_areas=False) >>> # Increment vector 0 item 0 >>> mesh.v0[0] += 1 >>> mesh.v1[0] += 2 >>> # Check item 0 (contains v0, v1 and v2) - >>> assert numpy.array_equal( - ... mesh[0], - ... numpy.array([1., 1., 1., 2., 2., 2., 0., 0., 0.])) - >>> assert numpy.array_equal( - ... mesh.vectors[0], - ... numpy.array([[1., 1., 1.], - ... [2., 2., 2.], - ... [0., 0., 0.]])) - >>> assert numpy.array_equal( - ... mesh.v0[0], - ... numpy.array([1., 1., 1.])) - >>> assert numpy.array_equal( + >>> assert np.array_equal( + ... mesh[0], np.array([1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0]) + ... ) + >>> assert np.array_equal( + ... mesh.vectors[0], + ... np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [0.0, 0.0, 0.0]]), + ... ) + >>> assert np.array_equal(mesh.v0[0], np.array([1.0, 1.0, 1.0])) + >>> assert np.array_equal( ... mesh.points[0], - ... numpy.array([1., 1., 1., 2., 2., 2., 0., 0., 0.])) - >>> assert numpy.array_equal( + ... np.array([1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0]), + ... ) + >>> assert np.array_equal( ... mesh.data[0], - ... numpy.array(( - ... [0., 0., 0.], - ... [[1., 1., 1.], [2., 2., 2.], [0., 0., 0.]], - ... [0]), - ... dtype=BaseMesh.dtype)) - >>> assert numpy.array_equal(mesh.x[0], numpy.array([1., 2., 0.])) + ... np.array( + ... ( + ... [0.0, 0.0, 0.0], + ... [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [0.0, 0.0, 0.0]], + ... [0], + ... ), + ... dtype=BaseMesh.dtype, + ... ), + ... ) + >>> assert np.array_equal(mesh.x[0], np.array([1.0, 2.0, 0.0])) >>> mesh[0] = 3 - >>> assert numpy.array_equal( - ... mesh[0], - ... numpy.array([3., 3., 3., 3., 3., 3., 3., 3., 3.])) + >>> assert np.array_equal( + ... mesh[0], np.array([3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0]) + ... ) >>> len(mesh) == len(list(mesh)) True - >>> (mesh.min_ < mesh.max_).all() + >>> bool((mesh.min_ < mesh.max_).all()) True >>> mesh.update_normals() - >>> mesh.units.sum() + >>> float(mesh.units.sum()) 0.0 >>> mesh.v0[:] = mesh.v1[:] = mesh.v2[:] = 0 - >>> mesh.points.sum() + >>> float(mesh.points.sum()) 0.0 >>> mesh.v0 = mesh.v1 = mesh.v2 = 0 >>> mesh.x = mesh.y = mesh.z = 0 >>> mesh.attr = 1 - >>> (mesh.attr == 1).all() + >>> bool((mesh.attr == 1).all()) True >>> mesh.normals = 2 - >>> (mesh.normals == 2).all() + >>> bool((mesh.normals == 2).all()) True >>> mesh.vectors = 3 - >>> (mesh.vectors == 3).all() + >>> bool((mesh.vectors == 3).all()) True >>> mesh.points = 4 - >>> (mesh.points == 4).all() + >>> bool((mesh.points == 4).all()) True - ''' + """ + #: - normals: :func:`numpy.float32`, `(3, )` #: - vectors: :func:`numpy.float32`, `(3, 3)` #: - attr: :func:`numpy.uint16`, `(1, )` - dtype = numpy.dtype([ - ('normals', numpy.float32, (3,)), - ('vectors', numpy.float32, (3, 3)), - ('attr', numpy.uint16, (1,)), - ]) + dtype = np.dtype( + [ + ('normals', np.float32, (3,)), + ('vectors', np.float32, (3, 3)), + ('attr', np.uint16, (1,)), + ] + ) dtype = dtype.newbyteorder('<') # Even on big endian arches, use little e. - def __init__(self, data, calculate_normals=True, - remove_empty_areas=False, - remove_duplicate_polygons=RemoveDuplicates.NONE, - name='', speedups=True, **kwargs): - super(BaseMesh, self).__init__(**kwargs) + def __init__( + self, + data, + calculate_normals=True, + remove_empty_areas=False, + remove_duplicate_polygons=RemoveDuplicates.NONE, + name='', + speedups=True, + **kwargs, + ): + super().__init__(**kwargs) self.speedups = speedups if remove_empty_areas: data = self.remove_empty_areas(data) if RemoveDuplicates.map(remove_duplicate_polygons).value: - data = self.remove_duplicate_polygons(data, - remove_duplicate_polygons) + data = self.remove_duplicate_polygons( + data, remove_duplicate_polygons + ) self.name = name self.data = data @@ -252,53 +282,53 @@ def v2(self, value): @property def x(self): - return self.points[:, Dimension.X::3] + return self.points[:, Dimension.X :: 3] @x.setter def x(self, value): - self.points[:, Dimension.X::3] = value + self.points[:, Dimension.X :: 3] = value @property def y(self): - return self.points[:, Dimension.Y::3] + return self.points[:, Dimension.Y :: 3] @y.setter def y(self, value): - self.points[:, Dimension.Y::3] = value + self.points[:, Dimension.Y :: 3] = value @property def z(self): - return self.points[:, Dimension.Z::3] + return self.points[:, Dimension.Z :: 3] @z.setter def z(self, value): - self.points[:, Dimension.Z::3] = value + self.points[:, Dimension.Z :: 3] = value @classmethod def remove_duplicate_polygons(cls, data, value=RemoveDuplicates.SINGLE): value = RemoveDuplicates.map(value) polygons = data['vectors'].sum(axis=1) # Get a sorted list of indices - idx = numpy.lexsort(polygons.T) + idx = np.lexsort(polygons.T) # Get the indices of all different indices - diff = numpy.any(polygons[idx[1:]] != polygons[idx[:-1]], axis=1) + diff = np.any(polygons[idx[1:]] != polygons[idx[:-1]], axis=1) if value is RemoveDuplicates.SINGLE: # Only return the unique data, the True is so we always get at # least the originals - return data[numpy.sort(idx[numpy.concatenate(([True], diff))])] + return data[np.sort(idx[np.concatenate(([True], diff))])] elif value is RemoveDuplicates.ALL: # We need to return both items of the shifted diff - diff_a = numpy.concatenate(([True], diff)) - diff_b = numpy.concatenate((diff, [True])) - diff = numpy.concatenate((diff, [False])) + diff_a = np.concatenate(([True], diff)) + diff_b = np.concatenate((diff, [True])) + diff = np.concatenate((diff, [False])) # Combine both unique lists - filtered_data = data[numpy.sort(idx[diff_a & diff_b])] + filtered_data = data[np.sort(idx[diff_a & diff_b])] if len(filtered_data) <= len(data) / 2: - return data[numpy.sort(idx[diff_a])] + return data[np.sort(idx[diff_a])] else: - return data[numpy.sort(idx[diff])] + return data[np.sort(idx[diff])] else: return data @@ -308,13 +338,13 @@ def remove_empty_areas(cls, data): v0 = vectors[:, 0] v1 = vectors[:, 1] v2 = vectors[:, 2] - normals = numpy.cross(v1 - v0, v2 - v0) - squared_areas = (normals ** 2).sum(axis=1) - return data[squared_areas > AREA_SIZE_THRESHOLD ** 2] + normals = np.cross(v1 - v0, v2 - v0) + squared_areas = (normals**2).sum(axis=1) + return data[squared_areas > AREA_SIZE_THRESHOLD**2] def update_normals(self, update_areas=True, update_centroids=True): - '''Update the normals, areas, and centroids for all points''' - normals = numpy.cross(self.v1 - self.v0, self.v2 - self.v0) + """Update the normals, areas, and centroids for all points""" + normals = np.cross(self.v1 - self.v0, self.v2 - self.v0) if update_areas: self.update_areas(normals) @@ -326,7 +356,7 @@ def update_normals(self, update_areas=True, update_centroids=True): def get_unit_normals(self): normals = self.normals.copy() - normal = numpy.linalg.norm(normals, axis=1) + normal = np.linalg.norm(normals, axis=1) non_zero = normal > 0 if non_zero.any(): normals[non_zero] /= normal[non_zero][:, None] @@ -340,32 +370,31 @@ def update_max(self): def update_areas(self, normals=None): if normals is None: - normals = numpy.cross(self.v1 - self.v0, self.v2 - self.v0) + normals = np.cross(self.v1 - self.v0, self.v2 - self.v0) - areas = .5 * numpy.sqrt((normals ** 2).sum(axis=1)) + areas = 0.5 * np.sqrt((normals**2).sum(axis=1)) self.areas = areas.reshape((areas.size, 1)) def update_centroids(self): - self.centroids = numpy.mean([self.v0, self.v1, self.v2], axis=0) + self.centroids = np.mean([self.v0, self.v1, self.v2], axis=0) def check(self, exact=False): - '''Check the mesh is valid or not + """Check the mesh is valid or not :param bool exact: Perform exact checks. - ''' + """ return self.is_closed(exact=exact) def is_closed(self, exact=False): # pragma: no cover - '''Check the mesh is closed or not + """Check the mesh is closed or not :param bool exact: Perform a exact check on edges. - ''' + """ if exact: reversed_triangles = ( - numpy.cross(self.v1 - self.v0, - self.v2 - self.v0) * self.normals - ).sum(axis=1) < 0 + np.cross(self.v1 - self.v0, self.v2 - self.v0) * self.normals + ).sum(axis=1) < 0 directed_edges = { tuple(edge.ravel() if not rev else edge[::-1, :].ravel()) for rev, edge in zip( @@ -378,39 +407,40 @@ def is_closed(self, exact=False): # pragma: no cover ) } if len(directed_edges) == 3 * self.data.size: - undirected_edges = {frozenset((edge[:3], edge[3:])) for edge in - directed_edges} + undirected_edges = { + frozenset((edge[:3], edge[3:])) for edge in directed_edges + } if len(directed_edges) == 2 * len(undirected_edges): return True else: - self.warning(''' + self.warning( + """ Use of not exact is_closed check. This check can lead to misleading results. You could try to use `exact=True`. See: - false positive: https://github.com/wolph/numpy-stl/issues/198 - false negative: https://github.com/wolph/numpy-stl/pull/213 - '''.strip() - ) - normals = numpy.asarray(self.normals, dtype=numpy.float64) + """.strip() + ) + normals = np.asarray(self.normals, dtype=np.float64) allowed_max_errors = ( - numpy.abs(normals).sum(axis=0) * numpy.finfo( - numpy.float32).eps + np.abs(normals).sum(axis=0) * np.finfo(np.float32).eps ) - if (numpy.abs(normals.sum(axis=0)) <= allowed_max_errors).all(): + if (np.abs(normals.sum(axis=0)) <= allowed_max_errors).all(): return True self.warning( - ''' + """ Your mesh is not closed, the mass methods will not function correctly on this mesh. For more info: https://github.com/WoLpH/numpy-stl/issues/69 - '''.strip() + """.strip() ) return False def get_mass_properties(self): - ''' + """ Evaluate and return a tuple with the following elements: - the volume - the position of the center of gravity (COG) @@ -418,7 +448,7 @@ def get_mass_properties(self): Documentation can be found here: http://www.geometrictools.com/Documentation/PolyhedralMassProperties.pdf - ''' + """ self.check(True) def subexpression(x): @@ -445,18 +475,18 @@ def subexpression(x): f1y, f2y, f3y, g0y, g1y, g2y = subexpression(self.y) f1z, f2z, f3z, g0z, g1z, g2z = subexpression(self.z) - intg = numpy.zeros((10)) + intg = np.zeros(10) intg[0] = sum(d0 * f1x) intg[1:4] = sum(d0 * f2x), sum(d1 * f2y), sum(d2 * f2z) intg[4:7] = sum(d0 * f3x), sum(d1 * f3y), sum(d2 * f3z) intg[7] = sum(d0 * (y0 * g0x + y1 * g1x + y2 * g2x)) intg[8] = sum(d1 * (z0 * g0y + z1 * g1y + z2 * g2y)) intg[9] = sum(d2 * (x0 * g0z + x1 * g1z + x2 * g2z)) - intg /= numpy.array([6, 24, 24, 24, 60, 60, 60, 120, 120, 120]) + intg /= np.array([6, 24, 24, 24, 60, 60, 60, 120, 120, 120]) volume = intg[0] cog = intg[1:4] / volume - cogsq = cog ** 2 - inertia = numpy.zeros((3, 3)) + cogsq = cog**2 + inertia = np.zeros((3, 3)) inertia[0, 0] = intg[5] + intg[6] - volume * (cogsq[1] + cogsq[2]) inertia[1, 1] = intg[4] + intg[6] - volume * (cogsq[2] + cogsq[0]) inertia[2, 2] = intg[4] + intg[5] - volume * (cogsq[0] + cogsq[1]) @@ -471,19 +501,21 @@ def update_units(self): areas = self.areas if non_zero_areas.shape[0] != areas.shape[0]: # pragma: no cover - self.warning('Zero sized areas found, ' - 'units calculation will be partially incorrect') + self.warning( + 'Zero sized areas found, ' + 'units calculation will be partially incorrect' + ) if non_zero_areas.any(): non_zero_areas.shape = non_zero_areas.shape[0] - areas = numpy.hstack((2 * areas[non_zero_areas],) * DIMENSIONS) + areas = np.hstack((2 * areas[non_zero_areas],) * DIMENSIONS) units[non_zero_areas] /= areas self.units = units @classmethod def rotation_matrix(cls, axis, theta): - ''' + """ Generate a rotation matrix to Rotate the matrix over the given axis by the given theta (angle) @@ -494,18 +526,18 @@ def rotation_matrix(cls, axis, theta): :param numpy.array axis: Axis to rotate over (x, y, z) :param float theta: Rotation angle in radians, use `math.radians` to convert degrees to radians if needed. - ''' - axis = numpy.asarray(axis) + """ + axis = np.asarray(axis) # No need to rotate if there is no actual rotation if not axis.any(): - return numpy.identity(3) + return np.identity(3) - theta = 0.5 * numpy.asarray(theta) + theta = 0.5 * np.asarray(theta) - axis = axis / numpy.linalg.norm(axis) + axis = axis / np.linalg.norm(axis) a = math.cos(theta) - b, c, d = - axis * math.sin(theta) + b, c, d = -axis * math.sin(theta) angles = a, b, c, d powers = [x * y for x in angles for y in angles] aa, ab, ac, ad = powers[0:4] @@ -513,12 +545,16 @@ def rotation_matrix(cls, axis, theta): ca, cb, cc, cd = powers[8:12] da, db, dc, dd = powers[12:16] - return numpy.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], - [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], - [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) + return np.array( + [ + [aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], + [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], + [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc], + ] + ) def rotate(self, axis, theta=0, point=None): - ''' + """ Rotate the matrix over the given axis by the given theta (angle) Uses the :py:func:`rotation_matrix` in the background. @@ -533,7 +569,7 @@ def rotate(self, axis, theta=0, point=None): convert degrees to radians if needed. :param numpy.array point: Rotation point so manual translation is not required - ''' + """ # No need to rotate if there is no actual rotation if not theta: return @@ -541,26 +577,26 @@ def rotate(self, axis, theta=0, point=None): self.rotate_using_matrix(self.rotation_matrix(axis, theta), point) def rotate_using_matrix(self, rotation_matrix, point=None): - ''' + """ Rotate using a given rotation matrix and optional rotation point Note that this rotation produces clockwise rotations for positive angles which is arguably incorrect but will remain for legacy reasons. For more details, read here: https://github.com/WoLpH/numpy-stl/issues/166 - ''' + """ - identity = numpy.identity(rotation_matrix.shape[0]) + identity = np.identity(rotation_matrix.shape[0]) # No need to rotate if there is no actual rotation if not rotation_matrix.any() or (identity == rotation_matrix).all(): return - if isinstance(point, (numpy.ndarray, list, tuple)) and len(point) == 3: - point = numpy.asarray(point) + if isinstance(point, (np.ndarray, list, tuple)) and len(point) == 3: + point = np.asarray(point) elif point is None: - point = numpy.array([0, 0, 0]) + point = np.array([0, 0, 0]) elif isinstance(point, (int, float)): - point = numpy.asarray([point] * 3) + point = np.asarray([point] * 3) else: raise TypeError('Incorrect type for point', point) @@ -580,18 +616,18 @@ def _rotate(matrix): self.vectors[:, i] = _rotate(self.vectors[:, i]) def translate(self, translation): - ''' + """ Translate the mesh in the three directions :param numpy.array translation: Translation vector (x, y, z) - ''' + """ assert len(translation) == 3, 'Translation vector must be of length 3' self.x += translation[0] self.y += translation[1] self.z += translation[2] def transform(self, matrix): - ''' + """ Transform the mesh with a rotation and a translation stored in a single 4x4 matrix @@ -600,42 +636,31 @@ def transform(self, matrix): part of the transformation matrix[0:3, 3] represents the translation part of the transformation - ''' + """ is_a_4x4_matrix = matrix.shape == (4, 4) assert is_a_4x4_matrix, 'Transformation matrix must be of shape (4, 4)' rotation = matrix[0:3, 0:3] - unit_det_rotation = numpy.allclose(numpy.linalg.det(rotation), 1.0) + unit_det_rotation = np.allclose(np.linalg.det(rotation), 1.0) assert unit_det_rotation, 'Rotation matrix has not a unit determinant' for i in range(3): - self.vectors[:, i] = numpy.dot(rotation, self.vectors[:, i].T).T + self.vectors[:, i] = np.dot(rotation, self.vectors[:, i].T).T self.x += matrix[0, 3] self.y += matrix[1, 3] self.z += matrix[2, 3] - def _get_or_update(key): - def _get(self): - if not hasattr(self, '_%s' % key): - getattr(self, 'update_%s' % key)() - return getattr(self, '_%s' % key) - - return _get - - def _set(key): - def _set(self, value): - setattr(self, '_%s' % key, value) - - return _set - - min_ = property(_get_or_update('min'), _set('min'), - doc='Mesh minimum value') - max_ = property(_get_or_update('max'), _set('max'), - doc='Mesh maximum value') - areas = property(_get_or_update('areas'), _set('areas'), - doc='Mesh areas') - centroids = property(_get_or_update('centroids'), _set('centroids'), - doc='Mesh centroids') - units = property(_get_or_update('units'), _set('units'), - doc='Mesh unit vectors') + min_ = property( + _get_or_update('min'), _set('min'), doc='Mesh minimum value' + ) + max_ = property( + _get_or_update('max'), _set('max'), doc='Mesh maximum value' + ) + areas = property(_get_or_update('areas'), _set('areas'), doc='Mesh areas') + centroids = property( + _get_or_update('centroids'), _set('centroids'), doc='Mesh centroids' + ) + units = property( + _get_or_update('units'), _set('units'), doc='Mesh unit vectors' + ) def __getitem__(self, k): return self.points[k] @@ -647,15 +672,14 @@ def __len__(self): return self.points.shape[0] def __iter__(self): - for point in self.points: - yield point + yield from self.points def __repr__(self): return f'' def get_mass_properties_with_density(self, density): # add density for mesh,density unit kg/m3 when mesh is unit is m - self.check() + self.check(True) def subexpression(x): w0, w1, w2 = x[:, 0], x[:, 1], x[:, 2] @@ -681,31 +705,37 @@ def subexpression(x): f1y, f2y, f3y, g0y, g1y, g2y = subexpression(self.y) f1z, f2z, f3z, g0z, g1z, g2z = subexpression(self.z) - intg = numpy.zeros((10)) + intg = np.zeros(10) intg[0] = sum(d0 * f1x) intg[1:4] = sum(d0 * f2x), sum(d1 * f2y), sum(d2 * f2z) intg[4:7] = sum(d0 * f3x), sum(d1 * f3y), sum(d2 * f3z) intg[7] = sum(d0 * (y0 * g0x + y1 * g1x + y2 * g2x)) intg[8] = sum(d1 * (z0 * g0y + z1 * g1y + z2 * g2y)) intg[9] = sum(d2 * (x0 * g0z + x1 * g1z + x2 * g2z)) - intg /= numpy.array([6, 24, 24, 24, 60, 60, 60, 120, 120, 120]) + intg /= np.array([6, 24, 24, 24, 60, 60, 60, 120, 120, 120]) volume = intg[0] cog = intg[1:4] / volume - cogsq = cog ** 2 + cogsq = cog**2 vmass = volume * density - inertia = numpy.zeros((3, 3)) + inertia = np.zeros((3, 3)) inertia[0, 0] = (intg[5] + intg[6]) * density - vmass * ( - cogsq[1] + cogsq[2]) + cogsq[1] + cogsq[2] + ) inertia[1, 1] = (intg[4] + intg[6]) * density - vmass * ( - cogsq[2] + cogsq[0]) + cogsq[2] + cogsq[0] + ) inertia[2, 2] = (intg[4] + intg[5]) * density - vmass * ( - cogsq[0] + cogsq[1]) + cogsq[0] + cogsq[1] + ) inertia[0, 1] = inertia[1, 0] = -( - intg[7] * density - vmass * cog[0] * cog[1]) + intg[7] * density - vmass * cog[0] * cog[1] + ) inertia[1, 2] = inertia[2, 1] = -( - intg[8] * density - vmass * cog[1] * cog[2]) + intg[8] * density - vmass * cog[1] * cog[2] + ) inertia[0, 2] = inertia[2, 0] = -( - intg[9] * density - vmass * cog[2] * cog[0]) + intg[9] * density - vmass * cog[2] * cog[0] + ) return volume, vmass, cog, inertia diff --git a/stl/main.py b/stl/main.py index a3ee185..73e9798 100644 --- a/stl/main.py +++ b/stl/main.py @@ -8,26 +8,38 @@ def _get_parser(description): parser = argparse.ArgumentParser(description=description) parser.add_argument( - 'infile', nargs='?', type=argparse.FileType('rb'), - default=sys.stdin, help='STL file to read' + 'infile', + nargs='?', + type=argparse.FileType('rb'), + default=sys.stdin, + help='STL file to read', ) parser.add_argument( - 'outfile', nargs='?', type=argparse.FileType('wb'), - default=sys.stdout, help='STL file to write' + 'outfile', + nargs='?', + type=argparse.FileType('wb'), + default=sys.stdout, + help='STL file to write', ) parser.add_argument('--name', nargs='?', help='Name of the mesh') parser.add_argument( - '-n', '--use-file-normals', action='store_true', - help='Read the normals from the file instead of recalculating them' + '-n', + '--use-file-normals', + action='store_true', + help='Read the normals from the file instead of recalculating them', ) parser.add_argument( - '-r', '--remove-empty-areas', action='store_true', + '-r', + '--remove-empty-areas', + action='store_true', help='Remove areas with 0 surface areas to prevent errors during ' - 'normal calculation' + 'normal calculation', ) parser.add_argument( - '-s', '--disable-speedups', action='store_true', - help='Disable Cython speedups' + '-s', + '--disable-speedups', + action='store_true', + help='Disable Cython speedups', ) return parser @@ -50,17 +62,22 @@ def _get_name(args): continue else: return name + return None # pragma: no cover def main(): parser = _get_parser('Convert STL files from ascii to binary and back') parser.add_argument( - '-a', '--ascii', action='store_true', - help='Write ASCII file (default is binary)' + '-a', + '--ascii', + action='store_true', + help='Write ASCII file (default is binary)', ) parser.add_argument( - '-b', '--binary', action='store_true', - help='Force binary file (for TTYs)' + '-b', + '--binary', + action='store_true', + help='Force binary file (for TTYs)', ) args = parser.parse_args() @@ -70,7 +87,7 @@ def main(): fh=args.infile, calculate_normals=False, remove_empty_areas=args.remove_empty_areas, - speedups=not args.disable_speedups + speedups=not args.disable_speedups, ) if args.binary: @@ -81,8 +98,7 @@ def main(): mode = stl.AUTOMATIC stl_file.save( - name, args.outfile, mode=mode, - update_normals=not args.use_file_normals + name, args.outfile, mode=mode, update_normals=not args.use_file_normals ) @@ -91,14 +107,17 @@ def to_ascii(): args = parser.parse_args() name = _get_name(args) stl_file = stl.StlMesh( - filename=name, fh=args.infile, + filename=name, + fh=args.infile, calculate_normals=False, remove_empty_areas=args.remove_empty_areas, - speedups=not args.disable_speedups + speedups=not args.disable_speedups, ) stl_file.save( - name, args.outfile, mode=stl.ASCII, - update_normals=not args.use_file_normals + name, + args.outfile, + mode=stl.ASCII, + update_normals=not args.use_file_normals, ) @@ -107,12 +126,15 @@ def to_binary(): args = parser.parse_args() name = _get_name(args) stl_file = stl.StlMesh( - filename=name, fh=args.infile, + filename=name, + fh=args.infile, calculate_normals=False, remove_empty_areas=args.remove_empty_areas, - speedups=not args.disable_speedups + speedups=not args.disable_speedups, ) stl_file.save( - name, args.outfile, mode=stl.BINARY, - update_normals=not args.use_file_normals + name, + args.outfile, + mode=stl.BINARY, + update_normals=not args.use_file_normals, ) diff --git a/stl/mesh.py b/stl/mesh.py index b2af7d6..b713202 100644 --- a/stl/mesh.py +++ b/stl/mesh.py @@ -3,4 +3,3 @@ class Mesh(stl.BaseStl): pass - diff --git a/stl/stl.py b/stl/stl.py index d44ae8c..051167b 100644 --- a/stl/stl.py +++ b/stl/stl.py @@ -1,14 +1,18 @@ +# type: ignore[reportAttributeAccessIssue] import datetime import enum import io import os import struct import zipfile -from xml.etree import ElementTree +from xml.etree import ElementTree as ET -import numpy +import numpy as np -from . import __about__ as metadata, base +from . import ( + __about__ as metadata, + base, +) from .utils import b try: @@ -45,19 +49,18 @@ class Mode(enum.IntEnum): class BaseStl(base.BaseMesh): - @classmethod def load(cls, fh, mode=AUTOMATIC, speedups=True): - '''Load Mesh from STL file + """Load Mesh from STL file Automatically detects binary versus ascii STL files. :param file fh: The file handle to open :param int mode: Automatically detect the filetype or force binary - ''' + """ header = fh.read(HEADER_SIZE) if not header: - return + return None if isinstance(header, str): # pragma: no branch header = b(header) @@ -65,18 +68,14 @@ def load(cls, fh, mode=AUTOMATIC, speedups=True): if mode is AUTOMATIC: if header.lstrip().lower().startswith(b'solid'): try: - name, data = cls._load_ascii( - fh, header, speedups=speedups - ) + name, data = cls._load_ascii(fh, header, speedups=speedups) except RuntimeError as exception: - print('exception', exception) (recoverable, e) = exception.args # If we didn't read beyond the header the stream is still # readable through the binary reader if recoverable: name, data = cls._load_binary( - fh, header, - check_size=False + fh, header, check_size=False ) else: # Apparently we've read beyond the header. Let's try @@ -89,8 +88,7 @@ def load(cls, fh, mode=AUTOMATIC, speedups=True): # not 100% certain it's binary, check the size while # reading name, data = cls._load_binary( - fh, header, - check_size=True + fh, header, check_size=True ) else: name, data = cls._load_binary(fh, header) @@ -108,11 +106,12 @@ def _load_binary(cls, fh, header, check_size=False): if len(count_data) != COUNT_SIZE: count = 0 else: - count, = struct.unpack(' 0: position = fh.tell() @@ -184,15 +182,14 @@ def get(prefix=''): else: raise RuntimeError( recoverable[0], - '%r should start with %r' % (line, prefix) + f'{line!r} should start with {prefix!r}', ) if len(values) == 3: return [float(v) for v in values] else: # pragma: no cover raise RuntimeError( - recoverable[0], - 'Incorrect value %r' % line + recoverable[0], f'Incorrect value {line!r}' ) else: return b(raw_line) @@ -200,8 +197,7 @@ def get(prefix=''): line = get() if not lines: raise RuntimeError( - recoverable[0], - 'No lines found, impossible to read' + recoverable[0], 'No lines found, impossible to read' ) # Yield the name @@ -224,8 +220,8 @@ def get(prefix=''): assert get().lower() == b('endfacet') attrs = 0 yield (normals, (v0, v1, v2), attrs) - except AssertionError as e: # pragma: no cover - raise RuntimeError(recoverable[0], e) + except AssertionError as e: # pragma: no cover # noqa: PERF203 + raise RuntimeError(recoverable[0], e) from e except StopIteration: return @@ -243,10 +239,10 @@ def _load_ascii(cls, fh, header, speedups=True): else: iterator = cls._ascii_reader(fh, header) name = next(iterator) - return name, numpy.fromiter(iterator, dtype=cls.dtype) + return name, np.fromiter(iterator, dtype=cls.dtype) - def save(self, filename, fh=None, mode=AUTOMATIC, update_normals=True): - '''Save the STL to a (binary) file + def save(self, filename, fh=None, mode=AUTOMATIC, update_normals=True): # noqa: C901 + """Save the STL to a (binary) file If mode is :py:data:`AUTOMATIC` an :py:data:`ASCII` file will be written if the output is a TTY and a :py:data:`BINARY` file otherwise. @@ -255,7 +251,7 @@ def save(self, filename, fh=None, mode=AUTOMATIC, update_normals=True): :param file fh: The file handle to open :param int mode: The mode to write, default is :py:data:`AUTOMATIC`. :param bool update_normals: Whether to update the normals - ''' + """ assert filename, 'Filename is required for the STL headers' if update_normals: self.update_normals() @@ -268,7 +264,7 @@ def save(self, filename, fh=None, mode=AUTOMATIC, update_normals=True): write = self._write_ascii else: write = self._write_binary - except IOError: + except OSError: # If TTY checking fails then it's an io.BytesIO() (or one # of its siblings from io). Assume binary. write = self._write_binary @@ -279,14 +275,14 @@ def save(self, filename, fh=None, mode=AUTOMATIC, update_normals=True): elif mode is ASCII: write = self._write_ascii else: - raise ValueError('Mode %r is invalid' % mode) + raise ValueError(f'Mode {mode!r} is invalid') if isinstance(fh, io.TextIOBase): # Provide a more helpful error if the user mistakenly # assumes ASCII files should be text files. raise TypeError( - "File handles should be in binary mode - even when" - " writing an ASCII STL." + 'File handles should be in binary mode - even when' + ' writing an ASCII STL.' ) name = self.name @@ -299,7 +295,7 @@ def save(self, filename, fh=None, mode=AUTOMATIC, update_normals=True): else: with open(filename, 'wb') as fh: write(fh, name) - except IOError: # pragma: no cover + except OSError: # pragma: no cover pass def _write_ascii(self, fh, name): @@ -312,22 +308,41 @@ def _write_ascii(self, fh, name): if _speedups and speedups: # pragma: no cover _speedups.ascii_write(fh, b(name), self.data) else: + def p(s, file): - file.write(b('%s\n' % s)) + file.write(b(f'{s}\n')) - p('solid %s' % name, file=fh) + p(f'solid {name}', file=fh) for row in self.data: + # Explicitly convert each component to standard float for + # normals and vertices to be compatible with numpy 2.x + normals = tuple(float(n) for n in row['normals']) vectors = row['vectors'] - p('facet normal %r %r %r' % tuple(row['normals']), file=fh) + p('facet normal {:f} {:f} {:f}'.format(*normals), file=fh) p(' outer loop', file=fh) - p(' vertex %r %r %r' % tuple(vectors[0]), file=fh) - p(' vertex %r %r %r' % tuple(vectors[1]), file=fh) - p(' vertex %r %r %r' % tuple(vectors[2]), file=fh) + p( + ' vertex {:f} {:f} {:f}'.format( + *tuple(float(v) for v in vectors[0]) + ), + file=fh, + ) + p( + ' vertex {:f} {:f} {:f}'.format( + *tuple(float(v) for v in vectors[1]) + ), + file=fh, + ) + p( + ' vertex {:f} {:f} {:f}'.format( + *tuple(float(v) for v in vectors[2]) + ), + file=fh, + ) p(' endloop', file=fh) p('endfacet', file=fh) - p('endsolid %s' % name, file=fh) + p(f'endsolid {name}', file=fh) def get_header(self, name): # Format the header @@ -366,42 +381,48 @@ def _write_binary(self, fh, name): if self.data.size: # pragma: no cover assert fh.tell() > 84, ( 'numpy silently refused to write our file. Note that writing ' - 'to `StringIO` objects is not supported by `numpy`') + 'to `StringIO` objects is not supported by `numpy`' + ) @classmethod def from_file( - cls, filename, calculate_normals=True, fh=None, - mode=Mode.AUTOMATIC, speedups=True, **kwargs + cls, + filename, + calculate_normals=True, + fh=None, + mode=Mode.AUTOMATIC, + speedups=True, + **kwargs, ): - '''Load a mesh from a STL file + """Load a mesh from a STL file :param str filename: The file to load :param bool calculate_normals: Whether to update the normals :param file fh: The file handle to open :param dict kwargs: The same as for :py:class:`stl.mesh.Mesh` - ''' + """ if fh: - name, data = cls.load( - fh, mode=mode, speedups=speedups - ) + name, data = cls.load(fh, mode=mode, speedups=speedups) else: with open(filename, 'rb') as fh: - name, data = cls.load( - fh, mode=mode, speedups=speedups - ) + name, data = cls.load(fh, mode=mode, speedups=speedups) return cls( - data, calculate_normals, name=name, - speedups=speedups, **kwargs + data, calculate_normals, name=name, speedups=speedups, **kwargs ) @classmethod def from_multi_file( - cls, filename, calculate_normals=True, fh=None, - mode=Mode.AUTOMATIC, speedups=True, **kwargs + cls, + filename, + calculate_normals=True, + fh=None, + mode=Mode.AUTOMATIC, + speedups=True, + **kwargs, ): - '''Load multiple meshes from a STL file + """Load multiple meshes from a STL file Note: mode is hardcoded to ascii since binary stl files do not support the multi format @@ -410,11 +431,11 @@ def from_multi_file( :param bool calculate_normals: Whether to update the normals :param file fh: The file handle to open :param dict kwargs: The same as for :py:class:`stl.mesh.Mesh` - ''' + """ if fh: close = False else: - fh = open(filename, 'rb') + fh = open(filename, 'rb') # noqa: SIM115 close = True try: @@ -422,13 +443,13 @@ def from_multi_file( while raw_data: name, data = raw_data yield cls( - data, calculate_normals, name=name, - speedups=speedups, **kwargs - ) - raw_data = cls.load( - fh, mode=ASCII, - speedups=speedups + data, + calculate_normals, + name=name, + speedups=speedups, + **kwargs, ) + raw_data = cls.load(fh, mode=ASCII, speedups=speedups) finally: if close: @@ -436,10 +457,14 @@ def from_multi_file( @classmethod def from_files( - cls, filenames, calculate_normals=True, mode=Mode.AUTOMATIC, - speedups=True, **kwargs + cls, + filenames, + calculate_normals=True, + mode=Mode.AUTOMATIC, + speedups=True, + **kwargs, ): - '''Load multiple meshes from STL files into a single mesh + """Load multiple meshes from STL files into a single mesh Note: mode is hardcoded to ascii since binary stl files do not support the multi format @@ -448,20 +473,19 @@ def from_files( :param bool calculate_normals: Whether to update the normals :param file fh: The file handle to open :param dict kwargs: The same as for :py:class:`stl.mesh.Mesh` - ''' - meshes = [] - for filename in filenames: - meshes.append( - cls.from_file( - filename, - calculate_normals=calculate_normals, - mode=mode, - speedups=speedups, - **kwargs - ) + """ + meshes = [ + cls.from_file( + filename, + calculate_normals=calculate_normals, + mode=mode, + speedups=speedups, + **kwargs, ) + for filename in filenames + ] - data = numpy.concatenate([mesh.data for mesh in meshes]) + data = np.concatenate([mesh.data for mesh in meshes]) return cls(data, calculate_normals=calculate_normals, **kwargs) @classmethod @@ -469,16 +493,16 @@ def from_3mf_file(cls, filename, calculate_normals=True, **kwargs): with zipfile.ZipFile(filename) as zip: with zip.open('_rels/.rels') as rels_fh: model = None - root = ElementTree.parse(rels_fh).getroot() + root = ET.parse(rels_fh).getroot() for child in root: # pragma: no branch type_ = child.attrib.get('Type', '') if type_.endswith('3dmodel'): # pragma: no branch model = child.attrib.get('Target', '') break - assert model, 'No 3D model found in %s' % filename + assert model, f'No 3D model found in {filename}' with zip.open(model.lstrip('/')) as fh: - root = ElementTree.parse(fh).getroot() + root = ET.parse(fh).getroot() elements = root.findall('./{*}resources/{*}object/{*}mesh') for mesh_element in elements: # pragma: no branch @@ -490,15 +514,19 @@ def from_3mf_file(cls, filename, calculate_normals=True, **kwargs): if tag.endswith('vertices'): # Collect all the vertices for vertice in element: - a = {k: float(v) for k, v in - vertice.attrib.items()} + a = { + k: float(v) + for k, v in vertice.attrib.items() + } vertices.append([a['x'], a['y'], a['z']]) elif tag.endswith('triangles'): # pragma: no branch # Map the triangles to the vertices and collect for triangle in element: - a = {k: int(v) for k, v in - triangle.attrib.items()} + a = { + k: int(v) + for k, v in triangle.attrib.items() + } triangles.append( [ vertices[a['v1']], @@ -507,8 +535,8 @@ def from_3mf_file(cls, filename, calculate_normals=True, **kwargs): ] ) - mesh = cls(numpy.zeros(len(triangles), dtype=cls.dtype)) - mesh.vectors[:] = numpy.array(triangles) + mesh = cls(np.zeros(len(triangles), dtype=cls.dtype)) + mesh.vectors[:] = np.array(triangles) yield mesh diff --git a/tests/stl_corruption.py b/tests/stl_corruption.py index cac14f9..6a9c0bf 100644 --- a/tests/stl_corruption.py +++ b/tests/stl_corruption.py @@ -1,12 +1,12 @@ -from __future__ import print_function +import struct import sys -import numpy + +import numpy as np import pytest -import struct from stl import mesh -_STL_FILE = ''' +_STL_FILE = """ solid test.stl facet normal -0.014565 0.073223 -0.002897 outer loop @@ -16,7 +16,7 @@ endloop endfacet endsolid test.stl -'''.lstrip() +""".lstrip() def test_valid_ascii(tmpdir, speedups): @@ -51,7 +51,7 @@ def test_ascii_with_missing_name(tmpdir, speedups): def test_ascii_with_blank_lines(tmpdir, speedups): - _stl_file = ''' + _stl_file = """ solid test.stl @@ -69,7 +69,7 @@ def test_ascii_with_blank_lines(tmpdir, speedups): endfacet endsolid test.stl - '''.lstrip() + """.lstrip() tmp_file = tmpdir.join('tmp.stl') with tmp_file.open('w+') as fh: @@ -140,15 +140,9 @@ def test_corrupt_binary_file(tmpdir, speedups): def test_duplicate_polygons(): - data = numpy.zeros(3, dtype=mesh.Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 0, 0], - [1, 0, 0], - [0, 1, 1.]]) - data['vectors'][0] = numpy.array([[0, 0, 0], - [2, 0, 0], - [0, 2, 1.]]) - data['vectors'][0] = numpy.array([[0, 0, 0], - [3, 0, 0], - [0, 3, 1.]]) - - assert not mesh.Mesh(data, remove_empty_areas=False).check() + data = np.zeros(3, dtype=mesh.Mesh.dtype) + data['vectors'][0] = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 1.0]]) + data['vectors'][0] = np.array([[0, 0, 0], [2, 0, 0], [0, 2, 1.0]]) + data['vectors'][0] = np.array([[0, 0, 0], [3, 0, 0], [0, 3, 1.0]]) + + assert not mesh.Mesh(data, remove_empty_areas=False).check() # type: ignore[reportAttributeAccessIssue] diff --git a/tests/test_ascii.py b/tests/test_ascii.py index 229b90d..c703037 100644 --- a/tests/test_ascii.py +++ b/tests/test_ascii.py @@ -1,16 +1,17 @@ -import os -import sys +# type: ignore[reportAttributeAccessIssue] +import io import locale -import pytest +import os import pathlib -import warnings import subprocess -import io -import numpy +import sys +import warnings -from stl.utils import b -from stl import mesh, Mode +import numpy as np +import pytest +from stl import Mode, mesh +from stl.utils import b FILES_PATH = pathlib.Path(__file__).parent / 'stl_tests' @@ -22,8 +23,9 @@ def test_ascii_file(speedups): def test_chinese_name(tmpdir, speedups): name = 'Test Chinese name 月球' - _stl_file = (''' - solid %s + _stl_file = ( + f""" + solid {name} facet normal -0.014565 0.073223 -0.002897 outer loop vertex 0.399344 0.461940 1.044090 @@ -32,14 +34,16 @@ def test_chinese_name(tmpdir, speedups): endloop endfacet endsolid - ''' % name).lstrip() + """ + ).lstrip() tmp_file = tmpdir.join('tmp.stl') with tmp_file.open('wb+') as fh: fh.write(b(_stl_file)) fh.seek(0) - test_mesh = mesh.Mesh.from_file(str(tmp_file), fh=fh, - speedups=speedups) + test_mesh = mesh.Mesh.from_file( + str(tmp_file), fh=fh, speedups=speedups + ) if speedups: assert test_mesh.name.lower() == b(name).lower() else: @@ -49,8 +53,9 @@ def test_chinese_name(tmpdir, speedups): def test_long_name(tmpdir, speedups): name = 'Just Some Very Long Name which will not fit within the standard' name += name - _stl_file = (''' - solid %s + _stl_file = ( + f""" + solid {name} facet normal -0.014565 0.073223 -0.002897 outer loop vertex 0.399344 0.461940 1.044090 @@ -59,14 +64,16 @@ def test_long_name(tmpdir, speedups): endloop endfacet endsolid - ''' % name).lstrip() + """ + ).lstrip() tmp_file = tmpdir.join('tmp.stl') with tmp_file.open('wb+') as fh: fh.write(b(_stl_file)) fh.seek(0) - test_mesh = mesh.Mesh.from_file(str(tmp_file), fh=fh, - speedups=speedups) + test_mesh = mesh.Mesh.from_file( + str(tmp_file), fh=fh, speedups=speedups + ) if speedups: assert test_mesh.name.lower() == b(name).lower() @@ -77,8 +84,9 @@ def test_long_name(tmpdir, speedups): def test_scientific_notation(tmpdir, speedups): name = 'just some very long name which will not fit within the standard' name += name - _stl_file = (''' - solid %s + _stl_file = ( + f""" + solid {name} facet normal 1.014565e-10 7.3223e-5 -10 outer loop vertex 0.399344 0.461940 1.044090e-5 @@ -87,19 +95,22 @@ def test_scientific_notation(tmpdir, speedups): endloop endfacet endsolid - ''' % name).lstrip() + """ + ).lstrip() tmp_file = tmpdir.join('tmp.stl') with tmp_file.open('wb+') as fh: fh.write(b(_stl_file)) fh.seek(0) - test_mesh = mesh.Mesh.from_file(str(tmp_file), fh=fh, - speedups=speedups) + test_mesh = mesh.Mesh.from_file( + str(tmp_file), fh=fh, speedups=speedups + ) assert test_mesh.name == b(name) -@pytest.mark.skipif(sys.platform.startswith('win'), - reason='Only makes sense on Unix') +@pytest.mark.skipif( + sys.platform.startswith('win'), reason='Only makes sense on Unix' +) def test_locale_restore(speedups): if not speedups: pytest.skip('Only makes sense with speedups') @@ -113,14 +124,15 @@ def test_locale_restore(speedups): assert old_locale == new_locale -@pytest.mark.skipif(sys.platform.startswith('win'), - reason='Only makes sense on Unix') +@pytest.mark.skipif( + sys.platform.startswith('win'), reason='Only makes sense on Unix' +) def test_use_with_qt_with_custom_locale_decimal_delimeter(speedups): if not speedups: pytest.skip('Only makes sense with speedups') venv = os.environ.get('VIRTUAL_ENV', '') - if (3, 6) == sys.version_info[:2] and venv.startswith('/home/travis/'): + if sys.version_info[:2] == (3, 6) and venv.startswith('/home/travis/'): pytest.skip('PySide2/PyQt5 tests are broken on Travis Python 3.6') try: @@ -132,6 +144,7 @@ def test_use_with_qt_with_custom_locale_decimal_delimeter(speedups): warnings.warn( 'Unable to import PySide2/PyQt5, skipping locale tests', ImportWarning, + stacklevel=1, ) pytest.skip('PySide2/PyQt5 missing') assert QtWidgets @@ -146,11 +159,13 @@ def test_use_with_qt_with_custom_locale_decimal_delimeter(speedups): if sys.platform.startswith('linux'): prefix = ('xvfb-run', '-a') - p = subprocess.Popen(prefix + (sys.executable, script_path), - env=env, - universal_newlines=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + p = subprocess.Popen( + (*prefix, sys.executable, script_path), + env=env, + universal_newlines=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) out, err = p.communicate() # Unable to read the file with speedups, retrying @@ -165,21 +180,30 @@ def test_use_with_qt_with_custom_locale_decimal_delimeter(speedups): def test_ascii_io(): # Create a vanilla mesh. - mesh_ = mesh.Mesh(numpy.empty(3, mesh.Mesh.dtype)) - mesh_.vectors = numpy.arange(27).reshape((3, 3, 3)) + mesh_ = mesh.Mesh(np.empty(3, mesh.Mesh.dtype)) + mesh_.vectors = np.arange(27).reshape((3, 3, 3)) # Check that unhelpful 'expected str but got bytes' error is caught and # replaced. - with pytest.raises(TypeError, match="handles should be in binary mode"): - mesh_.save("nameless", fh=io.StringIO(), mode=Mode.ASCII) + with pytest.raises(TypeError, match='handles should be in binary mode'): + mesh_.save('nameless', fh=io.StringIO(), mode=Mode.ASCII) # Write to an io.BytesIO(). fh = io.BytesIO() - mesh_.save("nameless", fh=fh, mode=Mode.ASCII) + mesh_.save('nameless', fh=fh, mode=Mode.ASCII) # Assert binary file is still only ascii characters. - fh.getvalue().decode("ascii") + fh.getvalue().decode('ascii') + + import tempfile + + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + # Save the mesh to the temporary file + mesh_.save(temp_file.name, mode=Mode.ASCII) + + # Read the mesh back from the temporary file + read = mesh.Mesh.from_file(temp_file.name) # Read the mesh back in. - read = mesh.Mesh.from_file("anonymous.stl", fh=io.BytesIO(fh.getvalue())) + read = mesh.Mesh.from_file('anonymous.stl', fh=io.BytesIO(fh.getvalue())) # Check what comes out is the same as what went in. - assert numpy.allclose(mesh_.vectors, read.vectors) + assert np.allclose(mesh_.vectors, read.vectors) diff --git a/tests/test_binary.py b/tests/test_binary.py index 6740d5d..43b5170 100644 --- a/tests/test_binary.py +++ b/tests/test_binary.py @@ -1,9 +1,10 @@ import io -import numpy -import pytest import pathlib -from stl import mesh, Mode +import numpy as np +import pytest + +from stl import Mode, mesh TESTS_PATH = pathlib.Path(__file__).parent @@ -26,17 +27,14 @@ def _test(tmpdir, speedups, mode, use_filehandle=True): filename = TESTS_PATH / 'stl_binary' / 'rear_case.stl' if use_filehandle: with open(filename, 'rb') as fh: - mesh.Mesh.from_file(filename, fh=fh, speedups=speedups, - mode=mode) + mesh.Mesh.from_file(filename, fh=fh, speedups=speedups, mode=mode) with open(filename, 'rb') as fh: # Test with BytesIO fh = io.BytesIO(fh.read()) - mesh.Mesh.from_file(filename, fh=fh, speedups=speedups, - mode=mode) + mesh.Mesh.from_file(filename, fh=fh, speedups=speedups, mode=mode) else: - mesh.Mesh.from_file(filename, - speedups=speedups, mode=mode) + mesh.Mesh.from_file(filename, speedups=speedups, mode=mode) @pytest.mark.parametrize('mode', [Mode.BINARY, Mode.AUTOMATIC]) @@ -51,7 +49,7 @@ def test_write_bytes_io(binary_file, mode): assert fh.getvalue()[84:] == mesh_.data.tobytes() read = mesh.Mesh.from_file('nameless', fh=io.BytesIO(fh.getvalue())) - assert numpy.allclose(read.vectors, mesh_.vectors) + assert np.allclose(read.vectors, mesh_.vectors) def test_binary_file(): diff --git a/tests/test_commandline.py b/tests/test_commandline.py index 42a2f97..3b9f332 100644 --- a/tests/test_commandline.py +++ b/tests/test_commandline.py @@ -1,3 +1,4 @@ +import contextlib import sys from stl import main @@ -12,13 +13,13 @@ def test_main(ascii_file, binary_file, tmpdir, speedups): args_pre.append('-s') try: - sys.argv[:] = args_pre + [ascii_file] + args_post + sys.argv[:] = [*args_pre, ascii_file, *args_post] main.main() - sys.argv[:] = args_pre + ['-r', ascii_file] + args_post + sys.argv[:] = [*args_pre, '-r', ascii_file, *args_post] main.main() - sys.argv[:] = args_pre + ['-a', binary_file] + args_post + sys.argv[:] = [*args_pre, '-a', binary_file, *args_post] main.main() - sys.argv[:] = args_pre + ['-b', ascii_file] + args_post + sys.argv[:] = [*args_pre, '-b', ascii_file, *args_post] main.main() finally: sys.argv[:] = original_argv @@ -27,8 +28,8 @@ def test_main(ascii_file, binary_file, tmpdir, speedups): def test_args(ascii_file, tmpdir): parser = main._get_parser('') - def _get_name(*args): - return main._get_name(parser.parse_args(list(map(str, args)))) + def _get_name(*args) -> str: + return str(main._get_name(parser.parse_args(list(map(str, args))))) assert _get_name('--name', 'foobar') == 'foobar' assert _get_name('-', tmpdir.join('binary.stl')).endswith('binary.stl') @@ -45,10 +46,8 @@ def test_ascii(binary_file, tmpdir, speedups): binary_file, str(tmpdir.join('ascii.stl')), ] - try: + with contextlib.suppress(SystemExit): main.to_ascii() - except SystemExit: - pass finally: sys.argv[:] = original_argv @@ -62,9 +61,7 @@ def test_binary(ascii_file, tmpdir, speedups): ascii_file, str(tmpdir.join('binary.stl')), ] - try: + with contextlib.suppress(SystemExit): main.to_binary() - except SystemExit: - pass finally: sys.argv[:] = original_argv diff --git a/tests/test_convert.py b/tests/test_convert.py index 2d0d1ad..b508330 100644 --- a/tests/test_convert.py +++ b/tests/test_convert.py @@ -1,7 +1,8 @@ -import py.path -import pytest import tempfile +import py.path # type: ignore[import] +import pytest + from stl import stl @@ -36,13 +37,15 @@ def _test_conversion(from_, to, mode, speedups): def test_ascii_to_binary(ascii_path, binary_path, speedups): - _test_conversion(ascii_path, binary_path, mode=stl.BINARY, - speedups=speedups) + _test_conversion( + ascii_path, binary_path, mode=stl.BINARY, speedups=speedups + ) def test_binary_to_ascii(ascii_path, binary_path, speedups): - _test_conversion(binary_path, ascii_path, mode=stl.ASCII, - speedups=speedups) + _test_conversion( + binary_path, ascii_path, mode=stl.ASCII, speedups=speedups + ) def test_stl_mesh(ascii_file, tmpdir, speedups): @@ -50,7 +53,7 @@ def test_stl_mesh(ascii_file, tmpdir, speedups): mesh = stl.StlMesh(ascii_file, speedups=speedups) with pytest.raises(ValueError): - mesh.save(filename=str(tmp_file), mode='test') + mesh.save(filename=str(tmp_file), mode='test') # type: ignore[reportArgumentType] mesh.save(str(tmp_file)) mesh.save(str(tmp_file), update_normals=False) diff --git a/tests/test_line_endings.py b/tests/test_line_endings.py index 8d3c4ec..020bc49 100644 --- a/tests/test_line_endings.py +++ b/tests/test_line_endings.py @@ -1,12 +1,13 @@ import pathlib + import pytest -from stl import mesh +from stl import mesh FILES_PATH = pathlib.Path(__file__).parent / 'stl_tests' @pytest.mark.parametrize('line_ending', ['dos', 'unix']) def test_line_endings(line_ending, speedups): - filename = FILES_PATH / ('%s.stl' % line_ending) + filename = FILES_PATH / (f'{line_ending}.stl') mesh.Mesh.from_file(filename, speedups=speedups) diff --git a/tests/test_mesh.py b/tests/test_mesh.py index c5c93cb..c9f90f5 100644 --- a/tests/test_mesh.py +++ b/tests/test_mesh.py @@ -1,91 +1,66 @@ -import numpy +# type: ignore[reportAttributeAccessIssue] +import numpy as np +from stl.base import BaseMesh, RemoveDuplicates from stl.mesh import Mesh -from stl.base import BaseMesh -from stl.base import RemoveDuplicates from . import utils def test_units_1d(): - data = numpy.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 0, 0], - [1, 0, 0], - [2, 0, 0]]) + data = np.zeros(1, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0]]) mesh = Mesh(data, remove_empty_areas=False) mesh.update_units() assert mesh.areas == 0 - assert numpy.allclose(mesh.centroids, [[1, 0, 0]]) + assert np.allclose(mesh.centroids, [[1, 0, 0]]) utils.array_equals(mesh.normals, [0, 0, 0]) utils.array_equals(mesh.units, [0, 0, 0]) utils.array_equals(mesh.get_unit_normals(), [0, 0, 0]) def test_units_2d(): - data = numpy.zeros(2, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 0, 0], - [1, 0, 0], - [0, 1, 0]]) - data['vectors'][1] = numpy.array([[1, 0, 0], - [0, 1, 0], - [1, 1, 0]]) + data = np.zeros(2, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]]) + data['vectors'][1] = np.array([[1, 0, 0], [0, 1, 0], [1, 1, 0]]) mesh = Mesh(data, remove_empty_areas=False) mesh.update_units() - assert numpy.allclose(mesh.areas, [0.5, 0.5]) - assert numpy.allclose(mesh.centroids, [ - [1 / 3, 1 / 3, 0], - [2 / 3, 2 / 3, 0]]) - assert numpy.allclose(mesh.normals, [ - [0.0, 0.0, 1.0], - [0.0, 0.0, -1.0]]) - assert numpy.allclose(mesh.units, [[0, 0, 1], [0, 0, -1]]) - assert numpy.allclose(mesh.get_unit_normals(), [ - [0.0, 0.0, 1.0], - [0.0, 0.0, -1.0]]) + assert np.allclose(mesh.areas, [0.5, 0.5]) + assert np.allclose(mesh.centroids, [[1 / 3, 1 / 3, 0], [2 / 3, 2 / 3, 0]]) + assert np.allclose(mesh.normals, [[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]) + assert np.allclose(mesh.units, [[0, 0, 1], [0, 0, -1]]) + assert np.allclose( + mesh.get_unit_normals(), [[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]] + ) def test_units_3d(): - data = numpy.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 0, 0], - [1, 0, 0], - [0, 1, 1.]]) + data = np.zeros(1, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 1.0]]) mesh = Mesh(data, remove_empty_areas=False) mesh.update_units() - assert (mesh.areas - 2 ** .5) < 0.0001 - assert numpy.allclose(mesh.centroids, [1 / 3, 1 / 3, 1 / 3]) - assert numpy.allclose(mesh.normals, [0.0, -1.0, 1.0]) - assert numpy.allclose(mesh.units[0], [0.0, -0.70710677, 0.70710677]) - assert numpy.allclose(numpy.linalg.norm(mesh.units, axis=-1), 1) - assert numpy.allclose(mesh.get_unit_normals(), - [0.0, -0.70710677, 0.70710677]) + assert (mesh.areas - 2**0.5) < 0.0001 + assert np.allclose(mesh.centroids, [1 / 3, 1 / 3, 1 / 3]) + assert np.allclose(mesh.normals, [0.0, -1.0, 1.0]) + assert np.allclose(mesh.units[0], [0.0, -0.70710677, 0.70710677]) + assert np.allclose(np.linalg.norm(mesh.units, axis=-1), 1) + assert np.allclose(mesh.get_unit_normals(), [0.0, -0.70710677, 0.70710677]) def test_duplicate_polygons(): - data = numpy.zeros(6, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[1, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][1] = numpy.array([[2, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][2] = numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][3] = numpy.array([[2, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][4] = numpy.array([[1, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][5] = numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]]) + data = np.zeros(6, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][1] = np.array([[2, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][2] = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][3] = np.array([[2, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][4] = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][5] = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) mesh = Mesh(data) assert mesh.data.size == 6 @@ -108,47 +83,37 @@ def test_duplicate_polygons(): mesh = Mesh(data, remove_duplicate_polygons=True) assert mesh.data.size == 3 - assert numpy.allclose(mesh.vectors[0], numpy.array([[1, 0, 0], - [0, 0, 0], - [0, 0, 0]])) - assert numpy.allclose(mesh.vectors[1], numpy.array([[2, 0, 0], - [0, 0, 0], - [0, 0, 0]])) - assert numpy.allclose(mesh.vectors[2], numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]])) + assert np.allclose( + mesh.vectors[0], np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + ) + assert np.allclose( + mesh.vectors[1], np.array([[2, 0, 0], [0, 0, 0], [0, 0, 0]]) + ) + assert np.allclose( + mesh.vectors[2], np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + ) mesh = Mesh(data, remove_duplicate_polygons=RemoveDuplicates.ALL) assert mesh.data.size == 3 - assert numpy.allclose(mesh.vectors[0], numpy.array([[1, 0, 0], - [0, 0, 0], - [0, 0, 0]])) - assert numpy.allclose(mesh.vectors[1], numpy.array([[2, 0, 0], - [0, 0, 0], - [0, 0, 0]])) - assert numpy.allclose(mesh.vectors[2], numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]])) + assert np.allclose( + mesh.vectors[0], np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + ) + assert np.allclose( + mesh.vectors[1], np.array([[2, 0, 0], [0, 0, 0], [0, 0, 0]]) + ) + assert np.allclose( + mesh.vectors[2], np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + ) def test_remove_all_duplicate_polygons(): - data = numpy.zeros(5, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][1] = numpy.array([[1, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][2] = numpy.array([[2, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][3] = numpy.array([[3, 0, 0], - [0, 0, 0], - [0, 0, 0]]) - data['vectors'][4] = numpy.array([[3, 0, 0], - [0, 0, 0], - [0, 0, 0]]) + data = np.zeros(5, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][1] = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][2] = np.array([[2, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][3] = np.array([[3, 0, 0], [0, 0, 0], [0, 0, 0]]) + data['vectors'][4] = np.array([[3, 0, 0], [0, 0, 0], [0, 0, 0]]) mesh = Mesh(data, remove_duplicate_polygons=False) assert mesh.data.size == 5 @@ -157,28 +122,22 @@ def test_remove_all_duplicate_polygons(): mesh = Mesh(data, remove_duplicate_polygons=RemoveDuplicates.ALL) assert mesh.data.size == 3 - assert (mesh.vectors[0] == numpy.array([[0, 0, 0], - [0, 0, 0], - [0, 0, 0]])).all() - assert (mesh.vectors[1] == numpy.array([[1, 0, 0], - [0, 0, 0], - [0, 0, 0]])).all() - assert (mesh.vectors[2] == numpy.array([[2, 0, 0], - [0, 0, 0], - [0, 0, 0]])).all() + assert ( + mesh.vectors[0] == np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) + ).all() + assert ( + mesh.vectors[1] == np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + ).all() + assert ( + mesh.vectors[2] == np.array([[2, 0, 0], [0, 0, 0], [0, 0, 0]]) + ).all() def test_empty_areas(): - data = numpy.zeros(3, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 0, 0], - [1, 0, 0], - [0, 1, 0]]) - data['vectors'][1] = numpy.array([[1, 0, 0], - [0, 1, 0], - [1, 0, 0]]) - data['vectors'][2] = numpy.array([[1, 0, 0], - [0, 1, 0], - [1, 0, 0]]) + data = np.zeros(3, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]]) + data['vectors'][1] = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0]]) + data['vectors'][2] = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0]]) mesh = Mesh(data, calculate_normals=False, remove_empty_areas=False) assert mesh.data.size == 3 @@ -186,55 +145,67 @@ def test_empty_areas(): # Test the normals recalculation which also calculates the areas by default mesh.areas[1] = 1 mesh.areas[2] = 2 - assert numpy.allclose(mesh.areas, [[0.5], [1.0], [2.0]]) + assert np.allclose(mesh.areas, [[0.5], [1.0], [2.0]]) mesh.centroids[1] = [1, 2, 3] mesh.centroids[2] = [4, 5, 6] - assert numpy.allclose(mesh.centroids, [[1 / 3, 1 / 3, 0], - [1, 2, 3], - [4, 5, 6]]) + assert np.allclose( + mesh.centroids, [[1 / 3, 1 / 3, 0], [1, 2, 3], [4, 5, 6]] + ) mesh.update_normals(update_areas=False, update_centroids=False) - assert numpy.allclose(mesh.areas, [[0.5], [1.0], [2.0]]) - assert numpy.allclose(mesh.centroids, [[1 / 3, 1 / 3, 0], - [1, 2, 3], - [4, 5, 6]]) + assert np.allclose(mesh.areas, [[0.5], [1.0], [2.0]]) + assert np.allclose( + mesh.centroids, [[1 / 3, 1 / 3, 0], [1, 2, 3], [4, 5, 6]] + ) mesh.update_normals(update_areas=True, update_centroids=True) - assert numpy.allclose(mesh.areas, [[0.5], [0.0], [0.0]]) - assert numpy.allclose(mesh.centroids, [[1 / 3, 1 / 3, 0], - [2 / 3, 1 / 3, 0], - [2 / 3, 1 / 3, 0]]) + assert np.allclose(mesh.areas, [[0.5], [0.0], [0.0]]) + assert np.allclose( + mesh.centroids, + [[1 / 3, 1 / 3, 0], [2 / 3, 1 / 3, 0], [2 / 3, 1 / 3, 0]], + ) mesh = Mesh(data, remove_empty_areas=True) assert mesh.data.size == 1 def test_base_mesh(): - data = numpy.zeros(10, dtype=BaseMesh.dtype) + data = np.zeros(10, dtype=BaseMesh.dtype) mesh = BaseMesh(data, remove_empty_areas=False) # Increment vector 0 item 0 mesh.v0[0] += 1 mesh.v1[0] += 2 # Check item 0 (contains v0, v1 and v2) - assert (mesh[0] == numpy.array( - [1., 1., 1., 2., 2., 2., 0., 0., 0.], dtype=numpy.float32) + assert ( + mesh[0] + == np.array( + [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0], dtype=np.float32 + ) ).all() - assert (mesh.vectors[0] == numpy.array([ - [1., 1., 1.], - [2., 2., 2.], - [0., 0., 0.]], dtype=numpy.float32)).all() - assert (mesh.v0[0] == numpy.array([1., 1., 1.], dtype=numpy.float32)).all() - assert (mesh.points[0] == numpy.array( - [1., 1., 1., 2., 2., 2., 0., 0., 0.], dtype=numpy.float32) + assert ( + mesh.vectors[0] + == np.array( + [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [0.0, 0.0, 0.0]], + dtype=np.float32, + ) ).all() + assert (mesh.v0[0] == np.array([1.0, 1.0, 1.0], dtype=np.float32)).all() assert ( - mesh.x[0] == numpy.array([1., 2., 0.], dtype=numpy.float32)).all() + mesh.points[0] + == np.array( + [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 0.0, 0.0, 0.0], dtype=np.float32 + ) + ).all() + assert (mesh.x[0] == np.array([1.0, 2.0, 0.0], dtype=np.float32)).all() mesh[0] = 3 - assert (mesh[0] == numpy.array( - [3., 3., 3., 3., 3., 3., 3., 3., 3.], dtype=numpy.float32) + assert ( + mesh[0] + == np.array( + [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], dtype=np.float32 + ) ).all() assert len(mesh) == len(list(mesh)) diff --git a/tests/test_meshProperties.py b/tests/test_mesh_properties.py similarity index 64% rename from tests/test_meshProperties.py rename to tests/test_mesh_properties.py index b2b4729..b0ff31f 100644 --- a/tests/test_meshProperties.py +++ b/tests/test_mesh_properties.py @@ -1,80 +1,95 @@ -import numpy +import numpy as np import pytest from stl import stl - -tolerance = 1e-6 +tolerance = 1e-5 def close(a, b): - return numpy.allclose(a, b, atol=tolerance) + return np.allclose(a, b, atol=tolerance) def test_mass_properties_for_half_donut(binary_ascii_path, speedups): - ''' + """ Checks the results of method get_mass_properties() on STL ASCII and binary files HalfDonut.stl One checks the results obtained with stl with the ones obtained with meshlab - ''' - filename = binary_ascii_path/'HalfDonut.stl' + """ + filename = binary_ascii_path / 'HalfDonut.stl' mesh = stl.StlMesh(str(filename), speedups=speedups) volume, cog, inertia = mesh.get_mass_properties() assert close([volume], [2.343149]) assert close(cog, [1.500001, 0.209472, 1.500001]) - assert close(inertia, [[+1.390429, +0.000000, +0.000000], - [+0.000000, +2.701025, +0.000000], - [+0.000000, +0.000000, +1.390429]]) + assert close( + inertia, + [ + [+1.390429, +0.000000, +0.000000], + [+0.000000, +2.701025, +0.000000], + [+0.000000, +0.000000, +1.390429], + ], + ) def test_mass_properties_for_moon(binary_ascii_path, speedups): - ''' + """ Checks the results of method get_mass_properties() on STL ASCII and binary files Moon.stl One checks the results obtained with stl with the ones obtained with meshlab - ''' - filename = binary_ascii_path/'Moon.stl' + """ + filename = binary_ascii_path / 'Moon.stl' mesh = stl.StlMesh(str(filename), speedups=speedups) volume, cog, inertia = mesh.get_mass_properties() assert close([volume], [0.888723]) assert close(cog, [0.906913, 0.170731, 1.500001]) - assert close(inertia, [[+0.562097, -0.000457, +0.000000], - [-0.000457, +0.656851, +0.000000], - [+0.000000, +0.000000, +0.112465]]) + assert close( + inertia, + [ + [+0.562097, -0.000457, +0.000000], + [-0.000457, +0.656851, +0.000000], + [+0.000000, +0.000000, +0.112465], + ], + ) @pytest.mark.parametrize('filename', ('Star.stl', 'StarWithEmptyHeader.stl')) def test_mass_properties_for_star(binary_ascii_path, filename, speedups): - ''' + """ Checks the results of method get_mass_properties() on STL ASCII and binary files Star.stl and STL binary file StarWithEmptyHeader.stl (with no header) One checks the results obtained with stl with the ones obtained with meshlab - ''' - filename = binary_ascii_path/filename + """ + filename = binary_ascii_path / filename if not filename.exists(): pytest.skip('STL file does not exist') mesh = stl.StlMesh(str(filename), speedups=speedups) volume, cog, inertia = mesh.get_mass_properties() assert close([volume], [1.416599]) assert close(cog, [1.299040, 0.170197, 1.499999]) - assert close(inertia, [[+0.509549, +0.000000, -0.000000], - [+0.000000, +0.991236, +0.000000], - [-0.000000, +0.000000, +0.509550]]) + assert close( + inertia, + [ + [+0.509549, +0.000000, -0.000000], + [+0.000000, +0.991236, +0.000000], + [-0.000000, +0.000000, +0.509550], + ], + ) def test_mass_properties_for_half_donut_with_density( - binary_ascii_path, speedups): - ''' + binary_ascii_path, speedups +): + """ Checks the results of method get_mass_properties_with_density() on STL ASCII and binary files HalfDonut.stl One checks the results obtained with stl with the ones obtained with meshlab - ''' - filename = binary_ascii_path/'HalfDonut.stl' + """ + filename = binary_ascii_path / 'HalfDonut.stl' mesh = stl.StlMesh(str(filename), speedups=speedups) volume, mass, cog, inertia = mesh.get_mass_properties_with_density(1.23) @@ -82,8 +97,13 @@ def test_mass_properties_for_half_donut_with_density( assert close([volume], [2.343149026234945]) assert close(cog, [1.500001, 0.209472, 1.500001]) print('inertia') - numpy.set_printoptions(suppress=True) + np.set_printoptions(suppress=True) print(inertia) - assert close(inertia, [[+1.71022851, +0.00000001, -0.00000011], - [+0.00000001, +3.32226227, +0.00000002], - [-0.00000011, +0.00000002, +1.71022859]]) + assert close( + inertia, + [ + [+1.71022851, +0.00000001, -0.00000011], + [+0.00000001, +3.32226227, +0.00000002], + [-0.00000011, +0.00000002, +1.71022859], + ], + ) diff --git a/tests/test_multiple.py b/tests/test_multiple.py index 6991544..5f6c5d9 100644 --- a/tests/test_multiple.py +++ b/tests/test_multiple.py @@ -4,7 +4,7 @@ from stl import mesh -_STL_FILE = b''' +_STL_FILE = b""" solid test.stl facet normal -0.014565 0.073223 -0.002897 outer loop @@ -14,7 +14,7 @@ endloop endfacet endsolid test.stl -''' +""" def test_single_stl(tmpdir, speedups): @@ -34,10 +34,9 @@ def test_multiple_stl(tmpdir, speedups): for _ in range(10): fh.write(_STL_FILE) fh.seek(0) + i = 0 for i, m in enumerate( - mesh.Mesh.from_multi_file( - str(tmp_file), fh=fh, speedups=speedups - ) + mesh.Mesh.from_multi_file(str(tmp_file), fh=fh, speedups=speedups) ): assert m.name == b'test.stl' @@ -49,9 +48,7 @@ def test_single_stl_file(tmpdir, speedups): with tmp_file.open('wb+') as fh: fh.write(_STL_FILE) fh.seek(0) - for m in mesh.Mesh.from_multi_file( - str(tmp_file), speedups=speedups - ): + for m in mesh.Mesh.from_multi_file(str(tmp_file), speedups=speedups): pass @@ -62,10 +59,9 @@ def test_multiple_stl_file(tmpdir, speedups): fh.write(_STL_FILE) fh.seek(0) + i = -1 for i, m in enumerate( - mesh.Mesh.from_multi_file( - str(tmp_file), speedups=speedups - ) + mesh.Mesh.from_multi_file(str(tmp_file), speedups=speedups) ): assert m.name == b'test.stl' diff --git a/tests/test_rotate.py b/tests/test_rotate.py index 5275fad..993d20b 100644 --- a/tests/test_rotate.py +++ b/tests/test_rotate.py @@ -1,5 +1,7 @@ +# type: ignore[reportAttributeAccessIssue] import math -import numpy + +import numpy as np import pytest from stl.mesh import Mesh @@ -9,35 +11,23 @@ def test_rotation(): # Create 6 faces of a cube - data = numpy.zeros(6, dtype=Mesh.dtype) + data = np.zeros(6, dtype=Mesh.dtype) # Top of the cube - data['vectors'][0] = numpy.array([[0, 1, 1], - [1, 0, 1], - [0, 0, 1]]) - data['vectors'][1] = numpy.array([[1, 0, 1], - [0, 1, 1], - [1, 1, 1]]) + data['vectors'][0] = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]]) + data['vectors'][1] = np.array([[1, 0, 1], [0, 1, 1], [1, 1, 1]]) # Right face - data['vectors'][2] = numpy.array([[1, 0, 0], - [1, 0, 1], - [1, 1, 0]]) - data['vectors'][3] = numpy.array([[1, 1, 1], - [1, 0, 1], - [1, 1, 0]]) + data['vectors'][2] = np.array([[1, 0, 0], [1, 0, 1], [1, 1, 0]]) + data['vectors'][3] = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 0]]) # Left face - data['vectors'][4] = numpy.array([[0, 0, 0], - [1, 0, 0], - [1, 0, 1]]) - data['vectors'][5] = numpy.array([[0, 0, 0], - [0, 0, 1], - [1, 0, 1]]) + data['vectors'][4] = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 1]]) + data['vectors'][5] = np.array([[0, 0, 0], [0, 0, 1], [1, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) # Since the cube faces are from 0 to 1 we can move it to the middle by # substracting .5 - data['vectors'] -= .5 + data['vectors'] -= 0.5 # Rotate 90 degrees over the X axis followed by the Y axis followed by the # X axis @@ -47,50 +37,50 @@ def test_rotation(): # Since the cube faces are from 0 to 1 we can move it to the middle by # substracting .5 - data['vectors'] += .5 + data['vectors'] += 0.5 # We use a slightly higher absolute tolerance here, for ppc64le # https://github.com/WoLpH/numpy-stl/issues/78 - assert numpy.allclose(mesh.vectors, numpy.array([ - [[1, 0, 0], [0, 1, 0], [0, 0, 0]], - [[0, 1, 0], [1, 0, 0], [1, 1, 0]], - [[0, 1, 1], [0, 1, 0], [1, 1, 1]], - [[1, 1, 0], [0, 1, 0], [1, 1, 1]], - [[0, 0, 1], [0, 1, 1], [0, 1, 0]], - [[0, 0, 1], [0, 0, 0], [0, 1, 0]], - ]), atol=1e-07) + assert np.allclose( + mesh.vectors, + np.array( + [ + [[1, 0, 0], [0, 1, 0], [0, 0, 0]], + [[0, 1, 0], [1, 0, 0], [1, 1, 0]], + [[0, 1, 1], [0, 1, 0], [1, 1, 1]], + [[1, 1, 0], [0, 1, 0], [1, 1, 1]], + [[0, 0, 1], [0, 1, 1], [0, 1, 0]], + [[0, 0, 1], [0, 0, 0], [0, 1, 0]], + ] + ), + atol=1e-07, + ) def test_rotation_over_point(): # Create a single face - data = numpy.zeros(1, dtype=Mesh.dtype) + data = np.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]) + data['vectors'][0] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) mesh.rotate([1, 0, 0], math.radians(180), point=[1, 2, 3]) utils.array_equals( mesh.vectors, - numpy.array([[[1., 4., 6.], - [0., 3., 6.], - [0., 4., 5.]]])) + np.array([[[1.0, 4.0, 6.0], [0.0, 3.0, 6.0], [0.0, 4.0, 5.0]]]), + ) mesh.rotate([1, 0, 0], math.radians(-180), point=[1, 2, 3]) utils.array_equals( - mesh.vectors, - numpy.array([[[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]])) + mesh.vectors, np.array([[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]) + ) mesh.rotate([1, 0, 0], math.radians(180), point=0.0) utils.array_equals( mesh.vectors, - numpy.array([[[1., 0., -0.], - [0., -1., -0.], - [0., 0., -1.]]])) + np.array([[[1.0, 0.0, -0.0], [0.0, -1.0, -0.0], [0.0, 0.0, -1.0]]]), + ) with pytest.raises(TypeError): mesh.rotate([1, 0, 0], math.radians(180), point='x') @@ -98,114 +88,111 @@ def test_rotation_over_point(): def test_double_rotation(): # Create a single face - data = numpy.zeros(1, dtype=Mesh.dtype) + data = np.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[1, 0, 0], - [0, 1, 0], - [0, 0, 1]]) + data['vectors'][0] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) rotation_matrix = mesh.rotation_matrix([1, 0, 0], math.radians(180)) - combined_rotation_matrix = numpy.dot(rotation_matrix, rotation_matrix) + combined_rotation_matrix = np.dot(rotation_matrix, rotation_matrix) mesh.rotate_using_matrix(combined_rotation_matrix) utils.array_equals( mesh.vectors, - numpy.array([[[1., 0., 0.], - [0., 1., 0.], - [0., 0., 1.]]])) + np.array([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]]), + ) def test_no_rotation(): # Create a single face - data = numpy.zeros(1, dtype=Mesh.dtype) + data = np.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 1, 1], - [1, 0, 1], - [0, 0, 1]]) + data['vectors'][0] = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) # Rotate by 0 degrees mesh.rotate([0.5, 0.0, 0.0], math.radians(0)) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) # Use a zero rotation matrix mesh.rotate([0.0, 0.0, 0.0], math.radians(90)) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) def test_no_translation(): # Create a single face - data = numpy.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 1, 1], - [1, 0, 1], - [0, 0, 1]]) + data = np.zeros(1, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) # Translate mesh with a zero vector mesh.translate([0.0, 0.0, 0.0]) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) def test_translation(): # Create a single face - data = numpy.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 1, 1], - [1, 0, 1], - [0, 0, 1]]) + data = np.zeros(1, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) # Translate mesh with vector [1, 2, 3] mesh.translate([1.0, 2.0, 3.0]) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[1, 3, 4], [2, 2, 4], [1, 2, 4]]])) + assert np.allclose( + mesh.vectors, np.array([[[1, 3, 4], [2, 2, 4], [1, 2, 4]]]) + ) def test_no_transformation(): # Create a single face - data = numpy.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 1, 1], - [1, 0, 1], - [0, 0, 1]]) + data = np.zeros(1, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) # Transform mesh with identity matrix - mesh.transform(numpy.eye(4)) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) - assert numpy.allclose(mesh.areas, 0.5) + mesh.transform(np.eye(4)) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) + assert np.allclose(mesh.areas, 0.5) def test_transformation(): # Create a single face - data = numpy.zeros(1, dtype=Mesh.dtype) - data['vectors'][0] = numpy.array([[0, 1, 1], - [1, 0, 1], - [0, 0, 1]]) + data = np.zeros(1, dtype=Mesh.dtype) + data['vectors'][0] = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]]) mesh = Mesh(data, remove_empty_areas=False) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 1, 1], [1, 0, 1], [0, 0, 1]]])) + assert np.allclose( + mesh.vectors, np.array([[[0, 1, 1], [1, 0, 1], [0, 0, 1]]]) + ) # Transform mesh with identity matrix - tr = numpy.zeros((4, 4)) - tr[0:3, 0:3] = Mesh.rotation_matrix([0, 0, 1], 0.5 * numpy.pi) + tr = np.zeros((4, 4)) + tr[0:3, 0:3] = Mesh.rotation_matrix([0, 0, 1], 0.5 * np.pi) tr[0:3, 3] = [1, 2, 3] mesh.transform(tr) - assert numpy.allclose(mesh.vectors, numpy.array([ - [[0, 2, 4], [1, 3, 4], [1, 2, 4]]])) - assert numpy.allclose(mesh.areas, 0.5) + assert np.allclose( + mesh.vectors, np.array([[[0, 2, 4], [1, 3, 4], [1, 2, 4]]]) + ) + assert np.allclose(mesh.areas, 0.5) diff --git a/tests/tmp/test_args_False_0/binary.stl b/tests/tmp/test_args_False_0/binary.stl new file mode 100644 index 0000000..e69de29 diff --git a/tests/tmp/test_args_False_current b/tests/tmp/test_args_False_current new file mode 120000 index 0000000..eedf3da --- /dev/null +++ b/tests/tmp/test_args_False_current @@ -0,0 +1 @@ +/Volumes/workspace/numpy-stl/tests/tmp/test_args_False_0 \ No newline at end of file diff --git a/tests/tmp/test_args_True_0/binary.stl b/tests/tmp/test_args_True_0/binary.stl new file mode 100644 index 0000000..e69de29 diff --git a/tests/tmp/test_args_True_current b/tests/tmp/test_args_True_current new file mode 120000 index 0000000..ecb22d9 --- /dev/null +++ b/tests/tmp/test_args_True_current @@ -0,0 +1 @@ +/Volumes/workspace/numpy-stl/tests/tmp/test_args_True_0 \ No newline at end of file diff --git a/tests/utils.py b/tests/utils.py index 1f7b919..4f62d13 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,11 +1,11 @@ -import numpy +import numpy as np def to_array(array, round): __tracebackhide__ = True - if not isinstance(array, numpy.ndarray): - array = numpy.array(array) + if not isinstance(array, np.ndarray): + array = np.array(array) if round: array = array.round(round) @@ -18,7 +18,7 @@ def array_equals(left, right, round=6): left = to_array(left, round) right = to_array(right, round) - message = 'Arrays are unequal:\n%s\n%s' % (left, right) + message = f'Arrays are unequal:\n{left}\n{right}' if left.size == right.size: message += '\nDifference:\n%s' % (left - right) diff --git a/tox.ini b/tox.ini index a5eb822..e841a32 100644 --- a/tox.ini +++ b/tox.ini @@ -1,27 +1,33 @@ [tox] -envlist = py36, py37, py38, py39, py310, pypy3, flake8, docs +env_list = ruff, black, pypy3, py3{9,10,11,12,13}-numpy{1,2}, docs, mypy, pyright skip_missing_interpreters = True [testenv] -deps = -rtests/requirements.txt -commands = +deps = + numpy1: numpy==1.* + numpy2: numpy==2.* + -rtests/requirements.txt +commands = python -m pip install -U pip wheel setuptools python setup.py build_ext --inplace python -m pytest -vvv {posargs} basepython = - py36: python3.6 - py37: python3.7 - py38: python3.8 py39: python3.9 py310: python3.10 + py311: python3.11 + py312: python3.12 + py313: python3.13 pypy3: pypy3 [gh-actions] python = - 3.7: py37 - 3.8: py38 3.9: py39 3.10: py310 + 3.11: py311 + 3.12: py312 + 3.13: py313 + pypy3: pypy3 + [testenv:flake8] basepython=python