diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 6126bcd..f54bcd3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -33,7 +33,7 @@ jobs: pip install tox - name: Run format, sort, lints and types run: | - tox -e format,sort,lints,types + tox -e format,lints,types test: name: unit tests diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bb892e7..b9cfdf4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,26 +13,6 @@ repos: - id: requirements-txt-fixer - id: trailing-whitespace -- repo: https://github.com/asottile/pyupgrade - rev: v2.29.1 - hooks: - - id: pyupgrade - args: [--py38-plus] - -- repo: https://github.com/psf/black - rev: 22.3.0 - hooks: - - id: black - args: ["--config=pyproject.toml"] - files: "(ramsey|examples)" - -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - args: ["--settings-path=pyproject.toml"] - files: "(ramsey|examples)" - - repo: https://github.com/pycqa/bandit rev: 1.7.1 hooks: @@ -44,15 +24,6 @@ repos: additional_dependencies: ["toml"] files: "(ramsey|examples)" -- repo: https://github.com/PyCQA/flake8 - rev: 5.0.1 - hooks: - - id: flake8 - additional_dependencies: [ - flake8-typing-imports==1.14.0, - flake8-pyproject==1.1.0.post0 - ] - - repo: https://github.com/pre-commit/mirrors-mypy rev: v0.910-1 hooks: @@ -77,8 +48,9 @@ repos: - id: gitlint - id: gitlint-ci -- repo: https://github.com/pycqa/pydocstyle - rev: 6.1.1 +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.3.0 hooks: - - id: pydocstyle - additional_dependencies: ["toml"] + - id: ruff + args: [ --fix ] + - id: ruff-format diff --git a/pyproject.toml b/pyproject.toml index 068273e..752d238 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,54 +1,19 @@ [build-system] requires = ["setuptools", "wheel"] -[tool.black] -line-length = 80 -target-version = ['py38'] -exclude = ''' -/( - \.eggs - | \.git - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | _build - | buck-out - | build - | dist -)/ -''' - -[tool.isort] -profile = "black" -line_length = 80 -include_trailing_comma = true +[tool.bandit] +skips = ["B101", "B310"] -[tool.flake8] -max-line-length = 80 -extend-ignore = ["E203", "W503", "E731", "E501"] -per-file-ignores = [ - '__init__.py:F401', -] +[tool.ruff] +line-length = 80 +exclude = ["*_test.py", "setup.py"] -[tool.pylint.'MESSAGES CONTROL'] -max-line-length=80 -disable = [ - "missing-module-docstring", - "missing-function-docstring", - "no-name-in-module", - "too-many-arguments", - "duplicate-code", - "invalid-name", - "attribute-defined-outside-init", - "unsubscriptable-object", - "unpacking-non-sequence", - "arguments-differ" +[tool.ruff.lint] +ignore= ["S101", "ANN1", "ANN2", "ANN0"] +select = ["ANN", "D", "E", "F"] +extend-select = [ + "UP", "D", "I", "PL", "S" ] -[tool.bandit] -skips = ["B101", "B310"] - -[tool.pydocstyle] +[tool.ruff.lint.pydocstyle] convention= 'numpy' -match = '^ramsey/.*/((?!test).)*\.py' diff --git a/ramsey/__init__.py b/ramsey/__init__.py index 18df724..7402114 100644 --- a/ramsey/__init__.py +++ b/ramsey/__init__.py @@ -1,6 +1,4 @@ -""" -ramsey: Probabilistic deep learning using JAX -""" +"""ramsey: Probabilistic deep learning using JAX.""" from ramsey._src.neural_process.attentive_neural_process import ANP from ramsey._src.neural_process.doubly_attentive_neural_process import DANP diff --git a/ramsey/_src/data/data.py b/ramsey/_src/data/data.py index c72143f..0150154 100644 --- a/ramsey/_src/data/data.py +++ b/ramsey/_src/data/data.py @@ -39,9 +39,7 @@ def m4_data(interval: str = "hourly", drop_na: bool = True): train_idxs = jnp.arange(train.shape[1]) test_idxs = jnp.arange(test.shape[1]) + train.shape[1] - return namedtuple( - "data", ["y", "x", "train_idxs", "test_idxs"] - )( # type: ignore + return namedtuple("data", ["y", "x", "train_idxs", "test_idxs"])( # type: ignore y, x, train_idxs, test_idxs ) diff --git a/ramsey/_src/data/dataset_m4.py b/ramsey/_src/data/dataset_m4.py index 3eed629..a507d4d 100644 --- a/ramsey/_src/data/dataset_m4.py +++ b/ramsey/_src/data/dataset_m4.py @@ -143,6 +143,7 @@ def load(self, interval: str) -> Tuple[pd.DataFrame, pd.DataFrame]: train, test = self._load(dataset, train_csv_path, test_csv_path) return train, test + # ruff: noqa: S310 def _download(self, dataset): for url in dataset.urls: file = os.path.basename(urlparse(url).path) diff --git a/ramsey/_src/experimental/bayesian_neural_network/bayesian_linear.py b/ramsey/_src/experimental/bayesian_neural_network/bayesian_linear.py index 77c8b63..153603c 100644 --- a/ramsey/_src/experimental/bayesian_neural_network/bayesian_linear.py +++ b/ramsey/_src/experimental/bayesian_neural_network/bayesian_linear.py @@ -115,6 +115,7 @@ def _get_bias(self, layer_dim, dtype): ) return samples, params + # ruff: noqa: PLR0913 def _init_param(self, weight_name, param_name, constraint, shape, dtype): init = initializers.xavier_normal() diff --git a/ramsey/_src/experimental/bayesian_neural_network/bayesian_neural_network.py b/ramsey/_src/experimental/bayesian_neural_network/bayesian_neural_network.py index 22e9880..df4d7a1 100644 --- a/ramsey/_src/experimental/bayesian_neural_network/bayesian_neural_network.py +++ b/ramsey/_src/experimental/bayesian_neural_network/bayesian_neural_network.py @@ -11,8 +11,7 @@ class BNN(nn.Module): - """ - A Bayesian neural network. + """A Bayesian neural network. The BNN layers can a mix of Bayesian layers and conventional layers. The training objective is the ELBO and is calculated according to [1]. diff --git a/ramsey/_src/experimental/bayesian_neural_network/train_bnn.py b/ramsey/_src/experimental/bayesian_neural_network/train_bnn.py index 3b17ea3..c166d37 100644 --- a/ramsey/_src/experimental/bayesian_neural_network/train_bnn.py +++ b/ramsey/_src/experimental/bayesian_neural_network/train_bnn.py @@ -9,7 +9,7 @@ from rmsyutls import as_batch_iterator from tqdm import tqdm -# pylint: disable=line-too-long +# ruff: noqa: E501 from ramsey._src.experimental.bayesian_neural_network.bayesian_neural_network import ( BNN, ) @@ -24,7 +24,7 @@ def _create_train_state(rng, model, optimizer, **init_data): return state -# pylint: disable=too-many-locals +# ruff: noqa: PLR0913 def train_bnn( rng_key, bnn: BNN, diff --git a/ramsey/_src/experimental/gaussian_process/gaussian_process.py b/ramsey/_src/experimental/gaussian_process/gaussian_process.py index 2639520..b89baca 100644 --- a/ramsey/_src/experimental/gaussian_process/gaussian_process.py +++ b/ramsey/_src/experimental/gaussian_process/gaussian_process.py @@ -14,8 +14,7 @@ # pylint: disable=too-many-instance-attributes,duplicate-code class GP(nn.Module): - """ - A Gaussian process. + """A Gaussian process. Attributes ---------- @@ -30,8 +29,7 @@ class GP(nn.Module): @nn.compact def __call__(self, x: Array, **kwargs): - """ - Evaluate the Gaussian process. + """Evaluate the Gaussian process. Parameters ---------- diff --git a/ramsey/_src/experimental/gaussian_process/kernel/stationary.py b/ramsey/_src/experimental/gaussian_process/kernel/stationary.py index 80bddf4..cbfa967 100644 --- a/ramsey/_src/experimental/gaussian_process/kernel/stationary.py +++ b/ramsey/_src/experimental/gaussian_process/kernel/stationary.py @@ -10,8 +10,7 @@ # pylint: disable=invalid-name class Periodic(Kernel, nn.Module): - """ - Periodic covariance function. + """Periodic covariance function. Attributes ---------- @@ -60,8 +59,7 @@ def __call__(self, x1: Array, x2: Array = None): class ExponentiatedQuadratic(Kernel, nn.Module): - """ - Exponentiated quadratic covariance function. + """Exponentiated quadratic covariance function. Attributes ---------- diff --git a/ramsey/_src/experimental/gaussian_process/train_gaussian_process.py b/ramsey/_src/experimental/gaussian_process/train_gaussian_process.py index a8432d9..303ccfd 100644 --- a/ramsey/_src/experimental/gaussian_process/train_gaussian_process.py +++ b/ramsey/_src/experimental/gaussian_process/train_gaussian_process.py @@ -13,7 +13,7 @@ ) -# pylint: disable=too-many-locals,invalid-name +# ruff: noqa: PLR0913 def train_gaussian_process( rng_key: jr.PRNGKey, gaussian_process: GP, @@ -79,7 +79,7 @@ def obj_fn(params): return state.params, objectives -# pylint: disable=too-many-locals,invalid-name +# ruff: noqa: D406 def train_sparse_gaussian_process( rng_key: jr.PRNGKey, gaussian_process: SparseGP, diff --git a/ramsey/_src/neural_process/attentive_neural_process.py b/ramsey/_src/neural_process/attentive_neural_process.py index 5de9ef7..5718d5e 100644 --- a/ramsey/_src/neural_process/attentive_neural_process.py +++ b/ramsey/_src/neural_process/attentive_neural_process.py @@ -10,8 +10,7 @@ __all__ = ["ANP"] -# pylint: disable=too-many-instance-attributes,duplicate-code -# pylint: disable=unpacking-non-sequence, +# ruff: noqa: PLR0913 class ANP(NP): """An attentive neural process. diff --git a/ramsey/_src/neural_process/doubly_attentive_neural_process.py b/ramsey/_src/neural_process/doubly_attentive_neural_process.py index 6076051..8e0092f 100644 --- a/ramsey/_src/neural_process/doubly_attentive_neural_process.py +++ b/ramsey/_src/neural_process/doubly_attentive_neural_process.py @@ -12,8 +12,7 @@ # pylint: disable=too-many-instance-attributes class DANP(ANP): - """ - A doubly-attentive neural process. + """A doubly-attentive neural process. Implements the core structure of a 'doubly-attentive' neural process [1], i.e., a deterministic encoder, a latent encoder with self-attention module, diff --git a/ramsey/_src/neural_process/neural_process_test.py b/ramsey/_src/neural_process/neural_process_test.py index 0205c8c..fc2a128 100644 --- a/ramsey/_src/neural_process/neural_process_test.py +++ b/ramsey/_src/neural_process/neural_process_test.py @@ -38,15 +38,9 @@ def module(): chex.assert_shape(params["latent_encoder_0"]["linear_0"]["kernel"], (2, 3)) chex.assert_shape(params["latent_encoder_0"]["linear_1"]["kernel"], (3, 3)) chex.assert_shape(params["latent_encoder_1"]["linear_0"]["kernel"], (3, 3)) - chex.assert_shape( - params["latent_encoder_1"]["linear_1"]["kernel"], (3, 2 * 3) - ) - chex.assert_shape( - params["deterministic_encoder"]["linear_0"]["kernel"], (2, 4) - ) - chex.assert_shape( - params["deterministic_encoder"]["linear_1"]["kernel"], (4, 4) - ) + chex.assert_shape(params["latent_encoder_1"]["linear_1"]["kernel"], (3, 2 * 3)) + chex.assert_shape(params["deterministic_encoder"]["linear_0"]["kernel"], (2, 4)) + chex.assert_shape(params["deterministic_encoder"]["linear_1"]["kernel"], (4, 4)) chex.assert_shape(params["decoder"]["linear_0"]["kernel"], (3 + 4 + 1, 3)) chex.assert_shape(params["decoder"]["linear_1"]["kernel"], (3, 2)) diff --git a/ramsey/_src/neural_process/train_neural_process.py b/ramsey/_src/neural_process/train_neural_process.py index a8cd033..fad4b1d 100644 --- a/ramsey/_src/neural_process/train_neural_process.py +++ b/ramsey/_src/neural_process/train_neural_process.py @@ -138,6 +138,7 @@ def _split_data( } +# ruff: noqa: ANN001,ANN003,PLR0913 def _create_train_state(rng, model, optimizer, **init_data): init_key, sample_key = jr.split(rng) params = model.init({"sample": sample_key, "params": init_key}, **init_data) diff --git a/ramsey/_src/nn/MLP.py b/ramsey/_src/nn/MLP.py index 02984fa..9e8ce31 100644 --- a/ramsey/_src/nn/MLP.py +++ b/ramsey/_src/nn/MLP.py @@ -8,8 +8,7 @@ class MLP(nn.Module): - """ - A multi-layer perceptron. + """A multi-layer perceptron. Attributes ---------- @@ -56,19 +55,19 @@ def setup(self): self.dropout_layer = nn.Dropout(self.dropout) # pylint: disable=too-many-function-args - def __call__(self, inputs: Array, is_training=False): + def __call__(self, inputs: Array, is_training: bool = False): """Transform the inputs through the MLP. Parameters ---------- - inputs: jax.Array + inputs: Array input data of dimension (*batch_dims, spatial_dims..., feature_dims) is_training: boolean if true, uses training mode (i.e., dropout) Returns ------- - jax.Array + Array returns the transformed inputs """ num_layers = len(self.layers) diff --git a/ramsey/_src/nn/attention/dotproduct_attention.py b/ramsey/_src/nn/attention/dotproduct_attention.py index f4eb624..320a051 100644 --- a/ramsey/_src/nn/attention/dotproduct_attention.py +++ b/ramsey/_src/nn/attention/dotproduct_attention.py @@ -5,10 +5,11 @@ from ramsey._src.nn.attention.attention import Attention +# ruff: noqa: PLR0913 class DotProductAttention(Attention): """Dot-product attention.""" - def __call__(self, key: Array, value: Array, query: Array): + def __call__(self, key: Array, value: Array, query: Array) -> Array: """Apply attention to the query. Arguments diff --git a/ramsey/_src/nn/attention/multihead_attention.py b/ramsey/_src/nn/attention/multihead_attention.py index 2ddeeb2..8a57f9e 100644 --- a/ramsey/_src/nn/attention/multihead_attention.py +++ b/ramsey/_src/nn/attention/multihead_attention.py @@ -16,9 +16,9 @@ from ramsey._src.nn.attention.attention import Attention +# ruff: noqa: PLR0913 class MultiHeadAttention(Attention): - """ - Multi-head attention. + """Multi-head attention. As described in [1]. @@ -41,7 +41,7 @@ class MultiHeadAttention(Attention): head_size: int embedding: Optional[nn.Module] - def setup(self): + def setup(self) -> None: """Construct the networks.""" self._attention = _MultiHeadAttention( num_heads=self.num_heads, @@ -50,7 +50,7 @@ def setup(self): ) @nn.compact - def __call__(self, key: Array, value: Array, query: Array): + def __call__(self, key: Array, value: Array, query: Array) -> Array: """Apply attention to the query. Arguments @@ -73,6 +73,7 @@ def __call__(self, key: Array, value: Array, query: Array): return rep +# ruff: noqa: E501 class _MultiHeadAttention(nn.Module): num_heads: int dtype = None @@ -99,7 +100,7 @@ def __call__( value: Array, mask: Optional[Array] = None, deterministic: Optional[bool] = None, - ): + ) -> Array: features = self.out_features or query.shape[-1] qkv_features = self.qkv_features or query.shape[-1] assert ( diff --git a/ramsey/data.py b/ramsey/data.py index 0fea700..f60215f 100644 --- a/ramsey/data.py +++ b/ramsey/data.py @@ -1,3 +1,5 @@ +"""Methods for downlaading data set.""" + from ramsey._src.data.data import ( m4_data, sample_from_gaussian_process, diff --git a/ramsey/experimental.py b/ramsey/experimental.py index 92c475e..2d72ea0 100644 --- a/ramsey/experimental.py +++ b/ramsey/experimental.py @@ -1,8 +1,10 @@ +"""Experimental and recently contributed methods.""" + from ramsey._src.experimental.bayesian_neural_network.bayesian_linear import ( BayesianLinear, ) -# pylint: disable=line-too-long +# ruff: noqa: E501 from ramsey._src.experimental.bayesian_neural_network.bayesian_neural_network import ( BNN, ) @@ -27,7 +29,7 @@ train_sparse_gaussian_process, ) -# pylint: disable=line-too-long +# ruff: noqa: E501 from ramsey._src.experimental.timeseries.recurrent_attentive_neural_process import ( RANP, ) diff --git a/ramsey/family.py b/ramsey/family.py index 03e9170..63b4ab7 100644 --- a/ramsey/family.py +++ b/ramsey/family.py @@ -1,3 +1,5 @@ +"""Distributional families.""" + from ramsey._src.family import Family, Gaussian, NegativeBinomial __all__ = ["Family", "Gaussian", "NegativeBinomial"] diff --git a/ramsey/nn.py b/ramsey/nn.py index 365f699..2f977c7 100644 --- a/ramsey/nn.py +++ b/ramsey/nn.py @@ -1,6 +1,4 @@ -""" -ramsey: Probabilistic deep learning using JAX -""" +"""ramsey: Probabilistic deep learning using JAX.""" from ramsey._src.nn.attention.attention import Attention from ramsey._src.nn.attention.multihead_attention import MultiHeadAttention diff --git a/setup.py b/setup.py index 030eb7b..2a9fdf3 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,6 @@ import re from os.path import abspath, dirname, join + from setuptools import find_packages, setup PROJECT_PATH = dirname(abspath(__file__)) @@ -29,8 +30,13 @@ def _version(): url="https://github.com/ramsey-devs/ramsey", author="The Ramsey developers", license="Apache 2.0", - keywords=["bayes", "jax", "probabilistic deep learning", - "probabilistic models", "neural process"], + keywords=[ + "Bayes", + "jax", + "probabilistic deep learning", + "probabilistic models", + "neural process", + ], packages=find_packages(), include_package_data=True, python_requires=">=3.9", @@ -50,7 +56,7 @@ def _version(): "examples": ["matplotlib"], }, classifiers=[ - "Development Status :: 1 - Planning", + "Development Status :: 3 - Alpha", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", diff --git a/tox.ini b/tox.ini index b6df529..f1140bf 100644 --- a/tox.ini +++ b/tox.ini @@ -1,30 +1,22 @@ [tox] -envlist = format, sort, lints, types, tests +envlist = format, lints, types, tests isolated_build = True [testenv:format] skip_install = true commands_pre = - pip install black + pip install ruff commands = - black --check ramsey - -[testenv:sort] -skip_install = true -commands_pre = - pip install isort -commands = - isort --check --settings-path=pyproject.toml ramsey + ruff format ramsey examples [testenv:lints] skip_install = true commands_pre = - pip install pylint bandit flake8 ruff + pip install ruff pip install -e . commands = bandit ramsey - pylint ramsey - ruff ramsey + ruff check ramsey [testenv:types]