diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 064dd93d8..d129ef26f 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -17,6 +17,10 @@ on: branches: - main +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + jobs: rngs: # To achieve consistent coverage, we need a little bit of correlated collaboration. @@ -248,8 +252,7 @@ jobs: fi echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" - # TODO: remove `-c numba` when numba 0.57 is properly released on conda-forge - $(command -v mamba || command -v conda) install -c numba packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 10fcca649..4588ed4f4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,10 +5,12 @@ # To update: `pre-commit autoupdate` # - &flake8_dependencies below needs updated manually ci: - # See: https://pre-commit.ci/#configuration - autofix_prs: false - autoupdate_schedule: monthly - skip: [pylint, no-commit-to-branch] + # See: https://pre-commit.ci/#configuration + autofix_prs: false + autoupdate_schedule: monthly + autoupdate_commit_msg: "chore: update pre-commit hooks" + autofix_commit_msg: "style: pre-commit fixes" + skip: [pylint, no-commit-to-branch] fail_fast: true default_language_version: python: python3 @@ -17,15 +19,21 @@ repos: rev: v4.4.0 hooks: - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks - id: check-ast - id: check-toml - id: check-yaml - id: debug-statements - id: end-of-file-fixer + exclude_types: [svg] - id: mixed-line-ending - id: trailing-whitespace + - id: name-tests-test + args: ["--pytest-test-first"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.2 + rev: v0.13 hooks: - id: validate-pyproject name: Validate pyproject.toml @@ -58,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.267 + rev: v0.0.269 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -86,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.267 + rev: v0.0.269 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint @@ -101,6 +109,10 @@ repos: hooks: - id: pyroma args: [-n, "10", .] + - repo: https://github.com/shellcheck-py/shellcheck-py + rev: "v0.9.0.2" + hooks: + - id: shellcheck - repo: local hooks: # Add `--hook-stage manual` to pre-commit command to run (very slow) @@ -137,4 +149,4 @@ repos: # hooks: # - id: bandit # -# blacken-docs, blackdoc mypy, pydocstringformatter, velin, flynt, yamllint +# blacken-docs, blackdoc prettier, mypy, pydocstringformatter, velin, flynt, yamllint diff --git a/README.md b/README.md index 570a82de5..13067df6e 100644 --- a/README.md +++ b/README.md @@ -176,8 +176,9 @@ use as well as the blocking/non-blocking mode. If the context is not initialized be performed automatically. ```python import graphblas as gb + # Context initialization must happen before any other imports -gb.init('suitesparse', blocking=True) +gb.init("suitesparse", blocking=True) # Now we can import other items from graphblas from graphblas import binary, semiring @@ -195,7 +196,7 @@ def force_odd_func(x): return x + 1 return x -unary.register_new('force_odd', force_odd_func) +unary.register_new("force_odd", force_odd_func) v = Vector.from_coo([0, 1, 3], [1, 2, 3]) w = v.apply(unary.force_odd).new() @@ -210,7 +211,7 @@ import graphblas as gb # scipy.sparse matrices A = gb.io.from_scipy_sparse(m) -m = gb.io.to_scipy_sparse(m, format='csr') +m = gb.io.to_scipy_sparse(m, format="csr") # networkx graphs A = gb.io.from_networkx(g) diff --git a/docs/getting_started/primer.rst b/docs/getting_started/primer.rst index 710dca702..104eb5738 100644 --- a/docs/getting_started/primer.rst +++ b/docs/getting_started/primer.rst @@ -89,26 +89,13 @@ makes for faster graph algorithms. # networkx-style storage of an undirected graph G = { - 0: {1: {'weight': 5.6}, - 2: {'weight': 2.3}, - 3: {'weight': 4.6}}, - 1: {0: {'weight': 5.6}, - 2: {'weight': 1.9}, - 3: {'weight': 6.2}}, - 2: {0: {'weight': 2.3}, - 1: {'weight': 1.9}, - 3: {'weight': 3.0}}, - 3: {0: {'weight': 4.6}, - 1: {'weight': 6.2}, - 2: {'weight': 3.0}, - 4: {'weight': 1.4}}, - 4: {3: {'weight': 1.4}, - 5: {'weight': 4.4}, - 6: {'weight': 1.0}}, - 5: {4: {'weight': 4.4}, - 6: {'weight': 2.8}}, - 6: {4: {'weight': 1.0}, - 5: {'weight': 2.8}} + 0: {1: {"weight": 5.6}, 2: {"weight": 2.3}, 3: {"weight": 4.6}}, + 1: {0: {"weight": 5.6}, 2: {"weight": 1.9}, 3: {"weight": 6.2}}, + 2: {0: {"weight": 2.3}, 1: {"weight": 1.9}, 3: {"weight": 3.0}}, + 3: {0: {"weight": 4.6}, 1: {"weight": 6.2}, 2: {"weight": 3.0}, 4: {"weight": 1.4}}, + 4: {3: {"weight": 1.4}, 5: {"weight": 4.4}, 6: {"weight": 1.0}}, + 5: {4: {"weight": 4.4}, 6: {"weight": 2.8}}, + 6: {4: {"weight": 1.0}, 5: {"weight": 2.8}}, } An alternative way to store a graph is as an adjacency matrix. Each node becomes both a row @@ -240,7 +227,9 @@ node 0. [0, 0, 1, 1, 2], [1, 2, 2, 3, 3], [2.0, 5.0, 1.5, 4.25, 0.5], - nrows=4, ncols=4) + nrows=4, + ncols=4 + ) v = Vector.from_coo([start_node], [0.0], size=4) # Compute SSSP diff --git a/docs/user_guide/init.rst b/docs/user_guide/init.rst index 62f81b50f..ffb6a3463 100644 --- a/docs/user_guide/init.rst +++ b/docs/user_guide/init.rst @@ -8,8 +8,9 @@ GraphBLAS must be initialized before it can be used. This is done with the .. code-block:: python import graphblas as gb + # Context initialization must happen before any other imports - gb.init('suitesparse', blocking=False) + gb.init("suitesparse", blocking=False) # Now we can import other items from graphblas from graphblas import binary, semiring diff --git a/docs/user_guide/operations.rst b/docs/user_guide/operations.rst index 9ee76ab4c..ede2efb06 100644 --- a/docs/user_guide/operations.rst +++ b/docs/user_guide/operations.rst @@ -26,14 +26,22 @@ a Vector is treated as an nx1 column matrix. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 2, 3, 3], - [2., 5., 1.5, 4.25, 0.5], nrows=4, ncols=4) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2, 3, 3], [1, 2, 0, 1, 1, 2, 0, 1], - [3., 2., 9., 6., 3., 1., 0., 5.]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2], + [1, 2, 2, 3, 3], + [2., 5., 1.5, 4.25, 0.5], + nrows=4, + ncols=4 + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2, 3, 3], + [1, 2, 0, 1, 1, 2, 0, 1], + [3., 2., 9., 6., 3., 1., 0., 5.] + ) C = gb.Matrix(float, A.nrows, B.ncols) # These are equivalent - C << A.mxm(B, op='min_plus') # method style + C << A.mxm(B, op="min_plus") # method style C << gb.semiring.min_plus(A @ B) # functional style .. csv-table:: A @@ -67,13 +75,18 @@ a Vector is treated as an nx1 column matrix. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 2, 3, 3], - [2., 5., 1.5, 4.25, 0.5], nrows=4, ncols=4) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2], + [1, 2, 2, 3, 3], + [2., 5., 1.5, 4.25, 0.5], + nrows=4, + ncols=4 + ) v = gb.Vector.from_coo([0, 1, 3], [10., 20., 40.]) w = gb.Vector(float, A.nrows) # These are equivalent - w << A.mxv(v, op='plus_times') # method style + w << A.mxv(v, op="plus_times") # method style w << gb.semiring.plus_times(A @ v) # functional style .. csv-table:: A @@ -102,12 +115,15 @@ a Vector is treated as an nx1 column matrix. .. code-block:: python v = gb.Vector.from_coo([0, 1, 3], [10., 20., 40.]) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2, 3, 3], [1, 2, 0, 1, 1, 2, 0, 1], - [3., 2., 9., 6., 3., 1., 0., 5.]) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2, 3, 3], + [1, 2, 0, 1, 1, 2, 0, 1], + [3., 2., 9., 6., 3., 1., 0., 5.] + ) u = gb.Vector(float, B.ncols) # These are equivalent - u << v.vxm(B, op='plus_plus') # method style + u << v.vxm(B, op="plus_plus") # method style u << gb.semiring.plus_plus(v @ B) # functional style .. csv-table:: v @@ -148,14 +164,20 @@ Example usage: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2], [1, 2, 0, 2, 1], - [2.0, 5.0, 1.5, 4.0, 0.5]) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 1, 2], - [3., -2., 0., 6., 3., 1.]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2], + [1, 2, 0, 2, 1], + [2., 5., 1.5, 4., 0.5] + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 1, 2], + [3., -2., 0., 6., 3., 1.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.ewise_mult(B, op='min') # method style + C << A.ewise_mult(B, op="min") # method style C << gb.binary.min(A & B) # functional style .. csv-table:: A @@ -225,14 +247,21 @@ should be used with the functional syntax, ``left_default`` and ``right_default` .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 0, 1, 1], [0, 1, 2, 0, 2], - [9.0, 2.0, 5.0, 1.5, 4.0], nrows=3) - B = gb.Matrix.from_coo([0, 0, 0, 2, 2, 2], [0, 1, 2, 0, 1, 2], - [4., 0., -2., 6., 3., 1.]) + A = gb.Matrix.from_coo( + [0, 0, 0, 1, 1], + [0, 1, 2, 0, 2], + [9., 2., 5., 1.5, 4.], + nrows=3 + ) + B = gb.Matrix.from_coo( + [0, 0, 0, 2, 2, 2], + [0, 1, 2, 0, 1, 2], + [4., 0., -2., 6., 3., 1.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.ewise_add(B, op='minus') # method style + C << A.ewise_add(B, op="minus") # method style C << gb.binary.minus(A | B) # functional style .. csv-table:: A @@ -263,14 +292,21 @@ should be used with the functional syntax, ``left_default`` and ``right_default` .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 0, 1, 1], [0, 1, 2, 0, 2], - [9.0, 2.0, 5.0, 1.5, 4.0], nrows=3) - B = gb.Matrix.from_coo([0, 0, 0, 2, 2, 2], [0, 1, 2, 0, 1, 2], - [4., 0., -2., 6., 3., 1.]) + A = gb.Matrix.from_coo( + [0, 0, 0, 1, 1], + [0, 1, 2, 0, 2], + [9., 2., 5., 1.5, 4.], + nrows=3 + ) + B = gb.Matrix.from_coo( + [0, 0, 0, 2, 2, 2], + [0, 1, 2, 0, 1, 2], + [4., 0., -2., 6., 3., 1.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.ewise_union(B, op='minus', left_default=0, right_default=0) # method style + C << A.ewise_union(B, op="minus", left_default=0, right_default=0) # method style C << gb.binary.minus(A | B, left_default=0, right_default=0) # functional style .. csv-table:: A @@ -341,8 +377,11 @@ Matrix List Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, 2, A.ncols) C << A[[0, 2], :] @@ -382,11 +421,16 @@ Matrix-Matrix Assignment Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) - B = gb.Matrix.from_coo([0, 0, 1, 1], [0, 1, 0, 1], - [-99., -98., -97., -96.]) - + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1], + [0, 1, 0, 1], + [-99., -98., -97., -96.] + ) A[::2, ::2] << B .. csv-table:: A @@ -416,8 +460,11 @@ Matrix-Vector Assignment Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) v = gb.Vector.from_coo([2], [-99.]) A[1, :] << v @@ -530,7 +577,7 @@ function with the collection as the argument. w = gb.Vector(float, v.size) # These are all equivalent - w << v.apply('minus', right=15) + w << v.apply("minus", right=15) w << gb.binary.minus(v, right=15) w << v - 15 @@ -557,12 +604,15 @@ Upper Triangle Example: .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 1, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 2, 1, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, A.nrows, A.ncols) # These are equivalent - C << A.select('triu') + C << A.select("triu") C << gb.select.triu(A) .. csv-table:: A @@ -589,7 +639,7 @@ Select by Value Example: w = gb.Vector(float, v.size) # These are equivalent - w << v.select('>=', 5) + w << v.select(">=", 5) w << gb.select.value(v >= 5) .. csv-table:: v @@ -618,11 +668,14 @@ A monoid or aggregator is used to perform the reduction. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 1], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 3, 0, 1, 0, 1], + [2., 5., 1.5, 4., 0.5, -7.] + ) w = gb.Vector(float, A.ncols) - w << A.reduce_columnwise('times') + w << A.reduce_columnwise("times") .. csv-table:: A :class: inline @@ -642,11 +695,14 @@ A monoid or aggregator is used to perform the reduction. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 1], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 3, 0, 1, 0, 1], + [2., 5., 1.5, 4., 0.5, -7.] + ) s = gb.Scalar(float) - s << A.reduce_scalar('max') + s << A.reduce_scalar("max") .. csv-table:: A :class: inline @@ -670,7 +726,7 @@ A monoid or aggregator is used to perform the reduction. s = gb.Scalar(int) # These are equivalent - s << v.reduce('argmin') + s << v.reduce("argmin") s << gb.agg.argmin(v) .. csv-table:: v @@ -695,8 +751,11 @@ To force the transpose to be computed by itself, use it by itself as the right-h .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 3, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 3, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, A.ncols, A.nrows) C << A.T @@ -728,12 +787,19 @@ The Kronecker product uses a binary operator. .. code-block:: python - A = gb.Matrix.from_coo([0, 0, 1], [0, 1, 0], [1., -2., 3.]) - B = gb.Matrix.from_coo([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 0, 2], - [2.0, 5.0, 1.5, 4.0, 0.5, -7.0]) + A = gb.Matrix.from_coo( + [0, 0, 1], + [0, 1, 0], + [1., -2., 3.] + ) + B = gb.Matrix.from_coo( + [0, 0, 1, 1, 2, 2], + [1, 2, 0, 1, 0, 2], + [2., 5., 1.5, 4., 0.5, -7.] + ) C = gb.Matrix(float, A.nrows * B.nrows, A.ncols * B.ncols) - C << A.kronecker(B, 'times') + C << A.kronecker(B, "times") .. csv-table:: A :class: inline diff --git a/docs/user_guide/operators.rst b/docs/user_guide/operators.rst index 84fe9312c..9499562f2 100644 --- a/docs/user_guide/operators.rst +++ b/docs/user_guide/operators.rst @@ -273,7 +273,7 @@ Example usage: minval = v.reduce(gb.monoid.min).value # This will force the FP32 version of min to be used, possibly type casting the elements - minvalFP32 = v.reduce(gb.monoid.min['FP32']).value + minvalFP32 = v.reduce(gb.monoid.min["FP32"]).value The gb.op Namespace @@ -431,7 +431,7 @@ the power of y for overlapping elements. .. code-block:: python - v ** w + v**w .. csv-table:: :header: 0,1,2,3,4,5 diff --git a/docs/user_guide/recorder.rst b/docs/user_guide/recorder.rst index ee6d2bbb9..3355d93ce 100644 --- a/docs/user_guide/recorder.rst +++ b/docs/user_guide/recorder.rst @@ -25,7 +25,9 @@ Instead, only the calls from the last iteration will be returned. [0, 0, 1, 1, 2], [1, 2, 2, 3, 3], [2.0, 5.0, 1.5, 4.25, 0.5], - nrows=4, ncols=4) + nrows=4, + ncols=4 + ) v = Vector.from_coo([start_node], [0.0], size=4) # Compute SSSP diff --git a/docs/user_guide/udf.rst b/docs/user_guide/udf.rst index 6c72535fc..b96097a85 100644 --- a/docs/user_guide/udf.rst +++ b/docs/user_guide/udf.rst @@ -21,7 +21,7 @@ Example user-defined UnaryOp: return x + 1 return x - unary.register_new('force_odd', force_odd_func) + unary.register_new("force_odd", force_odd_func) v = Vector.from_coo([0, 1, 3, 4, 5], [1, 2, 3, 8, 14]) w = v.apply(unary.force_odd).new() diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index b74ca347a..2542ad00e 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -457,7 +457,7 @@ def to_values(self, dtype=None, *, rows=True, columns=True, values=True, sort=Tr Requested dtype for the output values array. rows : bool, default=True Whether to return rows; will return ``None`` for rows if ``False`` - columns :bool, default=True + columns : bool, default=True Whether to return columns; will return ``None`` for columns if ``False`` values : bool, default=True Whether to return values; will return ``None`` for values if ``False`` diff --git a/graphblas/viz.py b/graphblas/viz.py index fafeae5f0..f0367e119 100644 --- a/graphblas/viz.py +++ b/graphblas/viz.py @@ -35,8 +35,7 @@ def _get_imports(names, within): except ImportError: modname = _LAZY_IMPORTS[name].split(".")[0] raise ImportError(f"`{within}` requires {modname} to be installed") from None - finally: - globals()[name] = val + globals()[name] = val rv.append(val) if is_string: return rv[0] diff --git a/pyproject.toml b/pyproject.toml index 245dc35bd..9d635c778 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -168,11 +168,18 @@ known_first_party = "graphblas" line_length = 100 [tool.pytest.ini_options] +minversion = "6.0" testpaths = "graphblas/tests" xfail_strict = true +addopts = [ + "--strict-config", # Force error if config is mispelled + "--strict-markers", # Force error if marker is mispelled (must be defined in config) + "-ra", # Print summary of all fails/errors +] markers = [ "slow: Skipped unless --runslow passed", ] +log_cli_level = "info" filterwarnings = [ # See: https://docs.python.org/3/library/warnings.html#describing-warning-filters # and: https://docs.pytest.org/en/7.2.x/how-to/capture-warnings.html#controlling-warnings @@ -342,6 +349,7 @@ ignore = [ "TID", # flake8-tidy-imports (Rely on isort and our own judgement) "TCH", # flake8-type-checking (Note: figure out type checking later) "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) + "TD", # flake8-todos (Maybe okay to add some of these) "ERA", # eradicate (We like code in comments!) "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ]