From 915f2a2d69dbec9e898f75c6f904c1be97f8fa07 Mon Sep 17 00:00:00 2001 From: Krystyna Milian Date: Tue, 29 Aug 2023 09:59:31 +0200 Subject: [PATCH] initial commit with the pipeline for training and evaluating seisbench models --- .gitignore | 9 +- README.md | 57 +- config.json | 15 + experiments/config.json | 11 - experiments/sweep_gpd.yaml | 26 + experiments/sweep_gpd_highpass.yaml | 27 + experiments/sweep_phasenet.yaml | 20 + poetry.lock | 1605 ++++++++++++++++++++++++++++++++++- pyproject.toml | 2 + scripts/augmentations.py | 133 +++ scripts/collect_results.py | 335 ++++++++ scripts/config_loader.py | 26 + scripts/data.py | 32 + scripts/eval.py | 248 ++++++ scripts/generate_eval_targets.py | 321 +++++++ scripts/hyperparameter_sweep.py | 178 ++++ scripts/models.py | 1138 +++++++++++++++++++++++++ scripts/pipeline.py | 92 ++ scripts/train.py | 532 +++++------- scripts/training_wandb_sweep.py | 62 -- scripts/util.py | 119 +++ 21 files changed, 4589 insertions(+), 399 deletions(-) create mode 100644 config.json delete mode 100644 experiments/config.json create mode 100644 experiments/sweep_gpd.yaml create mode 100644 experiments/sweep_gpd_highpass.yaml create mode 100644 experiments/sweep_phasenet.yaml create mode 100644 scripts/augmentations.py create mode 100644 scripts/collect_results.py create mode 100644 scripts/config_loader.py create mode 100644 scripts/data.py create mode 100644 scripts/eval.py create mode 100644 scripts/generate_eval_targets.py create mode 100644 scripts/hyperparameter_sweep.py create mode 100644 scripts/models.py create mode 100644 scripts/pipeline.py delete mode 100644 scripts/training_wandb_sweep.py create mode 100644 scripts/util.py diff --git a/.gitignore b/.gitignore index 8db7252..0b16eca 100644 --- a/.gitignore +++ b/.gitignore @@ -3,8 +3,11 @@ __pycache__/ */.ipynb_checkpoints/ .ipynb_checkpoints/ .env -models/ -data/ +weights/ +datasets/ wip artifacts/ -wandb/ \ No newline at end of file +wandb/ +scripts/pred/ +scripts/pred_resampled/ +scripts/lightning_logs/ \ No newline at end of file diff --git a/README.md b/README.md index 16fef77..938b096 100644 --- a/README.md +++ b/README.md @@ -2,20 +2,59 @@ This repo contains notebooks and scripts demonstrating how to: -- Prepare IGF data for training model detecting P phase (i.e. transform mseeds into [SeisBench data format](https://seisbench.readthedocs.io/en/stable/pages/data_format.html)), check the [notebook](utils/Transforming%20mseeds%20to%20SeisBench%20dataset.ipynb). -The original data can be downloaded from the [drive](https://drive.google.com/drive/folders/1InVI9DLaD7gdzraM2jMzeIrtiBSu-UIK?usp=drive_link) +- Prepare IGF data for training a seisbench model detecting P phase (i.e. transform mseeds into [SeisBench data format](https://seisbench.readthedocs.io/en/stable/pages/data_format.html)), check the [notebook](utils/Transforming%20mseeds%20to%20SeisBench%20dataset.ipynb). - Explore available data, check the [notebook](notebooks/Explore%20igf%20data.ipynb) -- Train cnn model (Seisbench PhaseNet) to detect P phase, check the [script](scripts/train.py) -- Use Weights & Biases to search for the best training hyperparams, check the [script](scripts/training_wandb_sweep.py) -- Validate model performance, check the [notebook](notebooks/Check%20model%20performance%20depending%20on%20station-random%20window.ipynb) -- Use model for detecting P phase, check the [notebook](notebooks/Present%20model%20predictions.ipynb) +- Train various cnn models available in seisbench library and compare their performance of detecting P phase, check the [script](scripts/pipeline.py) + +- [to update] Validate model performance, check the [notebook](notebooks/Check%20model%20performance%20depending%20on%20station-random%20window.ipynb) +- [to update] Use model for detecting P phase, check the [notebook](notebooks/Present%20model%20predictions.ipynb) +### Acknowledgments +This code is based on the [pick-benchmark](https://github.com/seisbench/pick-benchmark), the repository accompanying the paper: +[Which picker fits my data? A quantitative evaluation of deep learning based seismic pickers](https://github.com/seisbench/pick-benchmark#:~:text=Which%20picker%20fits%20my%20data%3F%20A%20quantitative%20evaluation%20of%20deep%20learning%20based%20seismic%20pickers) ### Usage 1. Install all dependencies with poetry, run: -`poetry install` -2. Prepare .env file with content: - `WANDB_API_KEY="your key"` \ No newline at end of file + `poetry install` +2. Prepare .env file with content: + ``` + WANDB_HOST="https://epos-ai.grid.cyfronet.pl/" + WANDB_API_KEY="your key" + WANDB_USER="your user" + WANDB_PROJECT="training_seisbench_models_on_igf_data" + BENCHMARK_DEFAULT_WORKER=2 + +3. Transform data into seisbench format. + * Download original data from the [drive](https://drive.google.com/drive/folders/1InVI9DLaD7gdzraM2jMzeIrtiBSu-UIK?usp=drive_link) + * Run the notebook: `utils/Transforming mseeds to SeisBench dataset.ipynb` + +4. Initialize poetry environment: + + `poetry shell` + +5. Run the pipeline script: + + `python pipeline.py` + + The script performs the following steps: + * Generates evaluation targets + * Trains multiple versions of GPD, PhaseNet and ... models to find the best hyperparameters, producing the lowest validation loss. + This step utilizes the Weights & Biases platform to perform the hyperparameters search (called sweeping) and track the training process and store the results. + The results are available at + `https://epos-ai.grid.cyfronet.pl//` + * Uses the best performing model of each type to generate predictions + * Evaluates the performance of each model by comparing the predictions with the evaluation targets + * Saves the results in the `scripts/pred` directory + * + The default settings are saved in config.json file. To change the settings, edit the config.json file or pass the new settings as arguments to the script. + For example, to change the sweep configuration file for GPD model, run: + `python pipeline.py --gpd_config ` + The new config file should be placed in the `experiments` or as specified in the `configs_path` parameter in the config.json file. + +### Troubleshooting + +* `wandb: ERROR Run .. errored: OSError(24, 'Too many open files')` +-> https://github.com/wandb/wandb/issues/2825 diff --git a/config.json b/config.json new file mode 100644 index 0000000..a323aff --- /dev/null +++ b/config.json @@ -0,0 +1,15 @@ +{ + "dataset_name": "igf", + "data_path": "datasets/igf/seisbench_format/", + "targets_path": "datasets/targets/igf", + "models_path": "weights", + "configs_path": "experiments", + "sampling_rate": 100, + "num_workers": 1, + "seed": 10, + "sweep_files": { + "GPD": "sweep_gpd.yaml", + "PhaseNet": "sweep_phasenet.yaml" + }, + "experiment_count": 20 +} \ No newline at end of file diff --git a/experiments/config.json b/experiments/config.json deleted file mode 100644 index abf900f..0000000 --- a/experiments/config.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "epochs": 10, - "batch_size": 256, - "dataset": "igf_1", - "sampling_rate": 100, - "model_names": "EQTransformer,BasicPhaseAE,GPD", - "model_name": "PhaseNet", - "learning_rate": 0.01, - "pretrained": null, - "sampling_rate": 100 -} \ No newline at end of file diff --git a/experiments/sweep_gpd.yaml b/experiments/sweep_gpd.yaml new file mode 100644 index 0000000..db0f435 --- /dev/null +++ b/experiments/sweep_gpd.yaml @@ -0,0 +1,26 @@ +name: GPD_fixed_highpass:2-10 +method: bayes +metric: + goal: minimize + name: val_loss +parameters: + model_name: + value: + - GPD + batch_size: + distribution: int_uniform + max: 1024 + min: 256 + max_epochs: + value: + - 3 + learning_rate: + distribution: uniform + max: 0.02 + min: 0.005 + highpass: + value: + - 2 + lowpass: + value: + - 10 \ No newline at end of file diff --git a/experiments/sweep_gpd_highpass.yaml b/experiments/sweep_gpd_highpass.yaml new file mode 100644 index 0000000..3ef6edb --- /dev/null +++ b/experiments/sweep_gpd_highpass.yaml @@ -0,0 +1,27 @@ +name: GPD_fixed_highpass:2-10 +method: bayes +metric: + goal: minimize + name: val_loss +parameters: + model_name: + value: + - GPD + batch_size: + distribution: int_uniform + max: 1024 + min: 256 + max_epochs: + value: + - 15 + learning_rate: + distribution: uniform + max: 0.02 + min: 0.005 + highpass: + distribution: uniform + min: 0.5 + max: 2.0 + lowpass: + value: + - 10 \ No newline at end of file diff --git a/experiments/sweep_phasenet.yaml b/experiments/sweep_phasenet.yaml new file mode 100644 index 0000000..702b69b --- /dev/null +++ b/experiments/sweep_phasenet.yaml @@ -0,0 +1,20 @@ +name: PhaseNet-lr0.005-0.02-bs256-1024 +method: bayes +metric: + goal: minimize + name: val_loss +parameters: + model_name: + value: + - PhaseNet + batch_size: + distribution: int_uniform + max: 1024 + min: 256 + max_epochs: + value: + - 15 + learning_rate: + distribution: uniform + max: 0.02 + min: 0.005 diff --git a/poetry.lock b/poetry.lock index b8d0e96..3319712 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,3 +1,21 @@ +[[package]] +name = "anyio" +version = "3.7.1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["packaging", "sphinx", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery", "sphinx-autodoc-typehints (>=1.2.0)"] +test = ["anyio", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)", "mock (>=4)"] +trio = ["trio (<0.22)"] + [[package]] name = "appdirs" version = "1.4.4" @@ -6,6 +24,142 @@ category = "main" optional = false python-versions = "*" +[[package]] +name = "appnope" +version = "0.1.3" +description = "Disable App Nap on macOS >= 10.9" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "argon2-cffi" +version = "21.3.0" +description = "The secure Argon2 password hashing algorithm." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +argon2-cffi-bindings = "*" + +[package.extras] +dev = ["pre-commit", "cogapp", "tomli", "coverage[toml] (>=5.0.2)", "hypothesis", "pytest", "sphinx", "sphinx-notfound-page", "furo"] +docs = ["sphinx", "sphinx-notfound-page", "furo"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pytest"] + +[[package]] +name = "argon2-cffi-bindings" +version = "21.2.0" +description = "Low-level CFFI bindings for Argon2" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +cffi = ">=1.0.1" + +[package.extras] +dev = ["pytest", "cogapp", "pre-commit", "wheel"] +tests = ["pytest"] + +[[package]] +name = "arrow" +version = "1.2.3" +description = "Better dates & times for Python" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +python-dateutil = ">=2.7.0" + +[[package]] +name = "asttokens" +version = "2.2.1" +description = "Annotate AST trees with source code positions" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = "*" + +[package.extras] +test = ["astroid", "pytest"] + +[[package]] +name = "async-lru" +version = "2.0.3" +description = "Simple LRU cache for asyncio" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +cov = ["attrs", "coverage[toml] (>=5.3)"] +dev = ["attrs", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest-mypy-plugins", "pytest-xdist", "pytest (>=4.3.0)"] + +[[package]] +name = "babel" +version = "2.12.1" +description = "Internationalization utilities" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "backcall" +version = "0.2.0" +description = "Specifications for callback functions passed in to an API" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "beautifulsoup4" +version = "4.12.2" +description = "Screen-scraping library" +category = "main" +optional = false +python-versions = ">=3.6.0" + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "bleach" +version = "6.0.0" +description = "An easy safelist-based HTML-sanitizing tool." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +six = ">=1.9.0" +webencodings = "*" + +[package.extras] +css = ["tinycss2 (>=1.1.0,<1.2)"] + [[package]] name = "certifi" version = "2023.5.7" @@ -14,6 +168,17 @@ category = "main" optional = false python-versions = ">=3.6" +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +pycparser = "*" + [[package]] name = "charset-normalizer" version = "3.1.0" @@ -41,6 +206,22 @@ category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +[[package]] +name = "comm" +version = "0.1.3" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +traitlets = ">=5.3" + +[package.extras] +lint = ["black (>=22.6.0)", "mdformat-gfm (>=0.3.5)", "mdformat (>0.7)", "ruff (>=0.0.156)"] +test = ["pytest"] +typing = ["mypy (>=0.990)"] + [[package]] name = "contourpy" version = "1.1.0" @@ -67,6 +248,14 @@ category = "main" optional = false python-versions = ">=3.6" +[[package]] +name = "debugpy" +version = "1.6.7" +description = "An implementation of the Debug Adapter Protocol for Python" +category = "main" +optional = false +python-versions = ">=3.7" + [[package]] name = "decorator" version = "5.1.1" @@ -75,6 +264,14 @@ category = "main" optional = false python-versions = ">=3.5" +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + [[package]] name = "docker-pycreds" version = "0.4.0" @@ -86,6 +283,39 @@ python-versions = "*" [package.dependencies] six = ">=1.4.0" +[[package]] +name = "exceptiongroup" +version = "1.1.2" +description = "Backport of PEP 654 (exception groups)" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "executing" +version = "1.2.0" +description = "Get the currently executing AST node of a frame, and other information" +category = "main" +optional = false +python-versions = "*" + +[package.extras] +tests = ["asttokens", "pytest", "littleutils", "rich"] + +[[package]] +name = "fastjsonschema" +version = "2.17.1" +description = "Fastest Python implementation of JSON schema" +category = "main" +optional = false +python-versions = "*" + +[package.extras] +devel = ["colorama", "jsonschema", "json-spec", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + [[package]] name = "filelock" version = "3.12.2" @@ -120,6 +350,14 @@ ufo = ["fs (>=2.2.0,<3)"] unicode = ["unicodedata2 (>=15.0.0)"] woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"] +[[package]] +name = "fqdn" +version = "1.5.1" +description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" + [[package]] name = "gitdb" version = "4.0.10" @@ -173,6 +411,98 @@ category = "main" optional = false python-versions = ">=3.5" +[[package]] +name = "ipykernel" +version = "6.24.0" +description = "IPython Kernel for Jupyter" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=20" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest-asyncio", "pytest-cov", "pytest-timeout", "pytest (>=7.0)"] + +[[package]] +name = "ipython" +version = "8.14.0" +description = "IPython: Productive Interactive Computing" +category = "main" +optional = false +python-versions = ">=3.9" + +[package.dependencies] +appnope = {version = "*", markers = "sys_platform == \"darwin\""} +backcall = "*" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} +pickleshare = "*" +prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5" + +[package.extras] +all = ["black", "ipykernel", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "docrepr", "matplotlib", "stack-data", "pytest (<7)", "typing-extensions", "pytest (<7.1)", "pytest-asyncio", "testpath", "nbconvert", "nbformat", "ipywidgets", "notebook", "ipyparallel", "qtconsole", "curio", "matplotlib (!=3.2.0)", "numpy (>=1.21)", "pandas", "trio"] +black = ["black"] +doc = ["ipykernel", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "docrepr", "matplotlib", "stack-data", "pytest (<7)", "typing-extensions", "pytest (<7.1)", "pytest-asyncio", "testpath"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] +test_extra = ["pytest (<7.1)", "pytest-asyncio", "testpath", "curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "trio"] + +[[package]] +name = "isoduration" +version = "20.11.0" +description = "Operations with ISO 8601 durations" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +arrow = ">=0.15.0" + +[[package]] +name = "jedi" +version = "0.18.2" +description = "An autocompletion tool for Python that can be used for text editors." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +parso = ">=0.8.0,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx-rtd-theme (==0.4.3)", "sphinx (==1.8.5)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] + [[package]] name = "jinja2" version = "3.1.2" @@ -187,6 +517,237 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "json5" +version = "0.9.14" +description = "A Python implementation of the JSON5 data format." +category = "main" +optional = false +python-versions = "*" + +[package.extras] +dev = ["hypothesis"] + +[[package]] +name = "jsonpointer" +version = "2.4" +description = "Identify specific nodes in a JSON document (RFC 6901)" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" + +[[package]] +name = "jsonschema" +version = "4.18.0" +description = "An implementation of JSON Schema validation for Python" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +attrs = ">=22.2.0" +fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} +rpds-py = ">=0.7.1" +uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +webcolors = {version = ">=1.11", optional = true, markers = "extra == \"format-nongpl\""} + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.6.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +referencing = ">=0.28.0" + +[[package]] +name = "jupyter-client" +version = "8.3.0" +description = "Jupyter protocol implementation and client libraries" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinx (>=4)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-core" +version = "5.3.1" +description = "Jupyter core package. A base package on which Jupyter projects rely." +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "jupyter-events" +version = "0.6.3" +description = "Jupyter Event System library" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +jsonschema = {version = ">=3.2.0", extras = ["format-nongpl"]} +python-json-logger = ">=2.0.4" +pyyaml = ">=5.3" +rfc3339-validator = "*" +rfc3986-validator = ">=0.1.1" +traitlets = ">=5.3" + +[package.extras] +cli = ["click", "rich"] +docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"] +test = ["click", "coverage", "pre-commit", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "pytest-cov", "pytest (>=7.0)", "rich"] + +[[package]] +name = "jupyter-lsp" +version = "2.2.0" +description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +jupyter-server = ">=1.1.2" + +[[package]] +name = "jupyter-server" +version = "2.7.0" +description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +anyio = ">=3.1.0" +argon2-cffi = "*" +jinja2 = "*" +jupyter-client = ">=7.4.4" +jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-events = ">=0.6.0" +jupyter-server-terminals = "*" +nbconvert = ">=6.4.4" +nbformat = ">=5.3.0" +overrides = "*" +packaging = "*" +prometheus-client = "*" +pywinpty = {version = "*", markers = "os_name == \"nt\""} +pyzmq = ">=24" +send2trash = "*" +terminado = ">=0.8.3" +tornado = ">=6.2.0" +traitlets = ">=5.6.0" +websocket-client = "*" + +[package.extras] +docs = ["ipykernel", "jinja2", "jupyter-client", "jupyter-server", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"] +test = ["flaky", "ipykernel", "pre-commit", "pytest-console-scripts", "pytest-jupyter[server] (>=0.4)", "pytest-timeout", "pytest (>=7.0)", "requests"] + +[[package]] +name = "jupyter-server-terminals" +version = "0.4.4" +description = "A Jupyter Server Extension Providing Terminals." +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} +terminado = ">=0.8.3" + +[package.extras] +docs = ["jinja2", "jupyter-server", "mistune (<3.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] +test = ["coverage", "jupyter-server (>=2.0.0)", "pytest-cov", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout", "pytest (>=7.0)"] + +[[package]] +name = "jupyterlab" +version = "4.0.2" +description = "JupyterLab computational environment" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +async-lru = ">=1.0.0" +ipykernel = "*" +jinja2 = ">=3.0.3" +jupyter-core = "*" +jupyter-lsp = ">=2.0.0" +jupyter-server = ">=2.4.0,<3" +jupyterlab-server = ">=2.19.0,<3" +notebook-shim = ">=0.2" +packaging = "*" +tomli = {version = "*", markers = "python_version < \"3.11\""} +tornado = ">=6.2.0" +traitlets = "*" + +[package.extras] +dev = ["black[jupyter] (==23.3.0)", "build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.0.271)"] +docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-tornasync", "sphinx-copybutton", "sphinx (>=1.8)"] +docs-screenshots = ["altair (==5.0.1)", "ipython (==8.14.0)", "ipywidgets (==8.0.6)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.0.post0)", "matplotlib (==3.7.1)", "nbconvert (>=7.0.0)", "pandas (==2.0.2)", "scipy (==1.10.1)", "vega-datasets (==0.9.0)"] +test = ["coverage", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "pytest (>=7.0)", "requests", "requests-cache", "virtualenv"] + +[[package]] +name = "jupyterlab-pygments" +version = "0.2.2" +description = "Pygments theme using JupyterLab CSS variables" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "jupyterlab-server" +version = "2.23.0" +description = "A set of server components for JupyterLab and JupyterLab like applications." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +babel = ">=2.10" +jinja2 = ">=3.0.3" +json5 = ">=0.9.0" +jsonschema = ">=4.17.3" +jupyter-server = ">=1.21,<3" +packaging = ">=21.3" +requests = ">=2.28" + +[package.extras] +docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"] +openapi = ["openapi-core (>=0.16.1,<0.17.0)", "ruamel-yaml"] +test = ["hatch", "ipykernel", "jupyterlab-server", "openapi-spec-validator (>=0.5.1,<0.6.0)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "pytest (>=7.0)", "requests-mock", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"] + [[package]] name = "kiwisolver" version = "1.4.4" @@ -237,6 +798,25 @@ pyparsing = ">=2.3.1" python-dateutil = ">=2.7" setuptools_scm = ">=7" +[[package]] +name = "matplotlib-inline" +version = "0.1.6" +description = "Inline Matplotlib backend for Jupyter" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mistune" +version = "3.0.1" +description = "A sane and fast Markdown parser with useful plugins and renderers" +category = "main" +optional = false +python-versions = ">=3.7" + [[package]] name = "mpmath" version = "1.3.0" @@ -251,6 +831,77 @@ docs = ["sphinx"] gmpy = ["gmpy2 (>=2.1.0a4)"] tests = ["pytest (>=4.6)"] +[[package]] +name = "nbclient" +version = "0.8.0" +description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." +category = "main" +optional = false +python-versions = ">=3.8.0" + +[package.dependencies] +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +nbformat = ">=5.1" +traitlets = ">=5.4" + +[package.extras] +dev = ["pre-commit"] +docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient", "sphinx-book-theme", "sphinx (>=1.7)", "sphinxcontrib-spelling"] +test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest-asyncio", "pytest-cov (>=4.0)", "pytest (>=7.0)", "testpath", "xmltodict"] + +[[package]] +name = "nbconvert" +version = "7.6.0" +description = "Converting Jupyter Notebooks" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +beautifulsoup4 = "*" +bleach = "!=5.0.0" +defusedxml = "*" +jinja2 = ">=3.0" +jupyter-core = ">=4.7" +jupyterlab-pygments = "*" +markupsafe = ">=2.0" +mistune = ">=2.0.3,<4" +nbclient = ">=0.5.0" +nbformat = ">=5.7" +packaging = "*" +pandocfilters = ">=1.4.1" +pygments = ">=2.4.1" +tinycss2 = "*" +traitlets = ">=5.1" + +[package.extras] +all = ["nbconvert"] +docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] +qtpdf = ["nbconvert"] +qtpng = ["pyqtwebengine (>=5.15)"] +serve = ["tornado (>=6.1)"] +test = ["ipykernel", "ipywidgets (>=7)", "pre-commit", "pytest", "pytest-dependency"] +webpdf = ["pyppeteer (>=1,<1.1)"] + +[[package]] +name = "nbformat" +version = "5.9.1" +description = "The Jupyter Notebook format" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +fastjsonschema = "*" +jsonschema = ">=2.6" +jupyter-core = "*" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + [[package]] name = "nest-asyncio" version = "1.5.6" @@ -274,6 +925,20 @@ doc = ["sphinx (>=6.1)", "pydata-sphinx-theme (>=0.13)", "sphinx-gallery (>=0.12 extra = ["lxml (>=4.6)", "pygraphviz (>=1.10)", "pydot (>=1.4.2)", "sympy (>=1.10)"] test = ["pytest (>=7.2)", "pytest-cov (>=4.0)", "codecov (>=2.1)"] +[[package]] +name = "notebook-shim" +version = "0.2.3" +description = "A shim layer for notebook traits and config" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +jupyter-server = ">=1.8,<3" + +[package.extras] +test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"] + [[package]] name = "numpy" version = "1.25.0" @@ -306,6 +971,14 @@ imaging = ["cartopy"] "io.shapefile" = ["pyshp"] tests = ["packaging", "pyproj", "pytest", "pytest-json-report"] +[[package]] +name = "overrides" +version = "7.3.1" +description = "A decorator to automatically detect mismatch when overriding a method." +category = "main" +optional = false +python-versions = ">=3.6" + [[package]] name = "packaging" version = "23.1" @@ -354,6 +1027,26 @@ sql-other = ["SQLAlchemy (>=1.4.16)"] test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "pytest-asyncio (>=0.17.0)"] xml = ["lxml (>=4.6.3)"] +[[package]] +name = "pandocfilters" +version = "1.5.0" +description = "Utilities for writing pandoc filters in python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "parso" +version = "0.8.3" +description = "A Python Parser" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["docopt", "pytest (<6.0.0)"] + [[package]] name = "pathtools" version = "0.1.2" @@ -362,6 +1055,25 @@ category = "main" optional = false python-versions = "*" +[[package]] +name = "pexpect" +version = "4.8.0" +description = "Pexpect allows easy control of interactive console applications." +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pickleshare" +version = "0.7.5" +description = "Tiny 'shelve'-like database with concurrency support" +category = "main" +optional = false +python-versions = "*" + [[package]] name = "pillow" version = "10.0.0" @@ -374,6 +1086,40 @@ python-versions = ">=3.8" docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +[[package]] +name = "platformdirs" +version = "3.8.1" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)", "sphinx (>=7.0.1)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest (>=7.3.1)"] + +[[package]] +name = "prometheus-client" +version = "0.17.1" +description = "Python client for the Prometheus monitoring system." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +twisted = ["twisted"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.39" +description = "Library for building powerful interactive command lines in Python" +category = "main" +optional = false +python-versions = ">=3.7.0" + +[package.dependencies] +wcwidth = "*" + [[package]] name = "protobuf" version = "4.23.3" @@ -393,6 +1139,44 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [package.extras] test = ["ipaddress", "mock", "enum34", "pywin32", "wmi"] +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "pure-eval" +version = "0.2.2" +description = "Safely evaluate AST nodes without side effects" +category = "main" +optional = false +python-versions = "*" + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "pygments" +version = "2.15.1" +description = "Pygments is a syntax highlighting package written in Python." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +plugins = ["importlib-metadata"] + [[package]] name = "pyparsing" version = "3.1.0" @@ -426,6 +1210,14 @@ python-versions = ">=3.8" [package.extras] cli = ["click (>=5.0)"] +[[package]] +name = "python-json-logger" +version = "2.0.7" +description = "A python library adding a json log formatter" +category = "main" +optional = false +python-versions = ">=3.6" + [[package]] name = "pytz" version = "2023.3" @@ -434,6 +1226,22 @@ category = "main" optional = false python-versions = "*" +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "pywinpty" +version = "2.0.10" +description = "Pseudo terminal support for Windows from Python." +category = "main" +optional = false +python-versions = ">=3.7" + [[package]] name = "pyyaml" version = "6.0" @@ -442,6 +1250,29 @@ category = "main" optional = false python-versions = ">=3.6" +[[package]] +name = "pyzmq" +version = "25.1.0" +description = "Python bindings for 0MQ" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "referencing" +version = "0.29.1" +description = "JSON Referencing + Python" +category = "main" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + [[package]] name = "requests" version = "2.31.0" @@ -460,6 +1291,33 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +description = "A pure python RFC3339 validator" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +six = "*" + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +description = "Pure python rfc3986 validator" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "rpds-py" +version = "0.8.10" +description = "Python bindings to Rust's persistent data structures (rpds)" +category = "main" +optional = false +python-versions = ">=3.8" + [[package]] name = "scipy" version = "1.9.3" @@ -498,6 +1356,19 @@ tqdm = ">=4.52" development = ["flake8", "black", "isort", "pre-commit"] tests = ["pytest", "pytest-asyncio"] +[[package]] +name = "send2trash" +version = "1.8.2" +description = "Send file to trash natively under Mac OS X, Windows and Linux" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" + +[package.extras] +nativelib = ["pyobjc-framework-cocoa", "pywin32"] +objc = ["pyobjc-framework-cocoa"] +win32 = ["pywin32"] + [[package]] name = "sentry-sdk" version = "1.26.0" @@ -581,6 +1452,22 @@ category = "main" optional = false python-versions = ">=3.6" +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "soupsieve" +version = "2.4.1" +description = "A modern CSS selector implementation for Beautiful Soup." +category = "main" +optional = false +python-versions = ">=3.7" + [[package]] name = "sqlalchemy" version = "2.0.17" @@ -617,6 +1504,22 @@ postgresql_psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3-binary"] +[[package]] +name = "stack-data" +version = "0.6.2" +description = "Extract data from python stack frames and tracebacks for informative displays" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["pytest", "typeguard", "pygments", "littleutils", "cython"] + [[package]] name = "sympy" version = "1.12" @@ -628,6 +1531,38 @@ python-versions = ">=3.8" [package.dependencies] mpmath = ">=0.19" +[[package]] +name = "terminado" +version = "0.17.1" +description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +ptyprocess = {version = "*", markers = "os_name != \"nt\""} +pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""} +tornado = ">=6.1.0" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["pre-commit", "pytest-timeout", "pytest (>=7.0)"] + +[[package]] +name = "tinycss2" +version = "1.2.1" +description = "A tiny CSS parser" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx-rtd-theme"] +test = ["pytest", "isort", "flake8"] + [[package]] name = "tomli" version = "2.0.1" @@ -676,6 +1611,14 @@ multimodal = ["transformers (>=4.10.0)"] test = ["phmdoctest (>=1.1.1)", "bert-score (==0.3.13)", "types-setuptools", "scipy (>1.0.0)", "fast-bss-eval (>=0.1.0)", "mir-eval (>=0.6)", "pytorch-msssim (==0.2.1)", "types-emoji", "types-tabulate", "types-protobuf", "types-requests", "pytest-timeout (<=2.1.0)", "rouge-score (>0.1.0)", "fire (<=0.5.0)", "cloudpickle (>1.3)", "kornia (>=0.6.7)", "scikit-image (>0.17.1)", "huggingface-hub (<0.7)", "types-six", "requests (<=2.28.2)", "pytest-cov (>2.10)", "coverage (>5.2)", "pytest-doctestplus (>=0.9.0)", "dython (<=0.7.3)", "sacrebleu (>=2.0.0)", "pytest (>=6.0.0)", "netcal (>1.0.0)", "psutil (<=5.9.4)", "jiwer (>=2.3.0)", "transformers (>4.4.0)", "scikit-learn (>1.0)", "mypy (==0.982)", "pandas (>1.0.0)", "pypesq (>1.2)", "torch-complex (<=0.4.3)", "pytest-rerunfailures (>=10.0)", "types-pyyaml"] text = ["tqdm (>=4.41.0)", "nltk (>=3.6)", "regex (>=2021.9.24)"] +[[package]] +name = "tornado" +version = "6.3.2" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +category = "main" +optional = false +python-versions = ">= 3.8" + [[package]] name = "tqdm" version = "4.65.0" @@ -693,6 +1636,18 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "traitlets" +version = "5.9.0" +description = "Traitlets Python configuration system" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] + [[package]] name = "typing-extensions" version = "4.7.1" @@ -709,6 +1664,17 @@ category = "main" optional = false python-versions = ">=2" +[[package]] +name = "uri-template" +version = "1.3.0" +description = "RFC 6570 URI Template Processor" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +dev = ["types-pyyaml", "mypy", "flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "pep8-naming"] + [[package]] name = "urllib3" version = "2.0.3" @@ -756,20 +1722,194 @@ media = ["numpy", "moviepy", "pillow", "bokeh", "soundfile", "plotly", "rdkit-py models = ["cloudpickle"] sweeps = ["sweeps (>=0.2.0)"] +[[package]] +name = "wcwidth" +version = "0.2.6" +description = "Measures the displayed width of unicode strings in a terminal" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "webcolors" +version = "1.13" +description = "A library for working with the color formats defined by HTML and CSS." +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +category = "main" +optional = false +python-versions = "*" + +[[package]] +name = "websocket-client" +version = "1.6.1" +description = "WebSocket client for Python with low level API options" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["Sphinx (>=3.4)", "sphinx-rtd-theme (>=0.5)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + [metadata] lock-version = "1.1" python-versions = "^3.10" -content-hash = "f10408102d4982572873acaefe1edf075df88d800fd7353fd6066af1c2382160" +content-hash = "2f8790f8c3e1a78ff23f0a0f0e954c97d2b0033fc6a890d4ef1355c6922dcc64" [metadata.files] +anyio = [ + {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, + {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, +] appdirs = [ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, ] +appnope = [ + {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, + {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, +] +argon2-cffi = [ + {file = "argon2-cffi-21.3.0.tar.gz", hash = "sha256:d384164d944190a7dd7ef22c6aa3ff197da12962bd04b17f64d4e93d934dba5b"}, + {file = "argon2_cffi-21.3.0-py3-none-any.whl", hash = "sha256:8c976986f2c5c0e5000919e6de187906cfd81fb1c72bf9d88c01177e77da7f80"}, +] +argon2-cffi-bindings = [ + {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, + {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, +] +arrow = [ + {file = "arrow-1.2.3-py3-none-any.whl", hash = "sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2"}, + {file = "arrow-1.2.3.tar.gz", hash = "sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1"}, +] +asttokens = [ + {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"}, + {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"}, +] +async-lru = [ + {file = "async-lru-2.0.3.tar.gz", hash = "sha256:b714c9d1415fca4e264da72a9e2abc66880ce7430e03a973341f88ea4c0d4869"}, + {file = "async_lru-2.0.3-py3-none-any.whl", hash = "sha256:00c0a8899c20b9c88663a47732689ff98189c9fa08ad9f734d7722f934d250b1"}, +] +attrs = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] +babel = [ + {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"}, + {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"}, +] +backcall = [ + {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, + {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, +] +beautifulsoup4 = [ + {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, + {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, +] +bleach = [ + {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, + {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, +] certifi = [ {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, ] +cffi = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] charset-normalizer = [ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, @@ -855,6 +1995,10 @@ colorama = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +comm = [ + {file = "comm-0.1.3-py3-none-any.whl", hash = "sha256:16613c6211e20223f215fc6d3b266a247b6e2641bf4e0a3ad34cb1aff2aa3f37"}, + {file = "comm-0.1.3.tar.gz", hash = "sha256:a61efa9daffcfbe66fd643ba966f846a624e4e6d6767eda9cf6e993aadaab93e"}, +] contourpy = [ {file = "contourpy-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc"}, {file = "contourpy-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3"}, @@ -900,14 +2044,50 @@ cycler = [ {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, ] +debugpy = [ + {file = "debugpy-1.6.7-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b3e7ac809b991006ad7f857f016fa92014445085711ef111fdc3f74f66144096"}, + {file = "debugpy-1.6.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3876611d114a18aafef6383695dfc3f1217c98a9168c1aaf1a02b01ec7d8d1e"}, + {file = "debugpy-1.6.7-cp310-cp310-win32.whl", hash = "sha256:33edb4afa85c098c24cc361d72ba7c21bb92f501104514d4ffec1fb36e09c01a"}, + {file = "debugpy-1.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:ed6d5413474e209ba50b1a75b2d9eecf64d41e6e4501977991cdc755dc83ab0f"}, + {file = "debugpy-1.6.7-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:38ed626353e7c63f4b11efad659be04c23de2b0d15efff77b60e4740ea685d07"}, + {file = "debugpy-1.6.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:279d64c408c60431c8ee832dfd9ace7c396984fd7341fa3116aee414e7dcd88d"}, + {file = "debugpy-1.6.7-cp37-cp37m-win32.whl", hash = "sha256:dbe04e7568aa69361a5b4c47b4493d5680bfa3a911d1e105fbea1b1f23f3eb45"}, + {file = "debugpy-1.6.7-cp37-cp37m-win_amd64.whl", hash = "sha256:f90a2d4ad9a035cee7331c06a4cf2245e38bd7c89554fe3b616d90ab8aab89cc"}, + {file = "debugpy-1.6.7-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:5224eabbbeddcf1943d4e2821876f3e5d7d383f27390b82da5d9558fd4eb30a9"}, + {file = "debugpy-1.6.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae1123dff5bfe548ba1683eb972329ba6d646c3a80e6b4c06cd1b1dd0205e9b"}, + {file = "debugpy-1.6.7-cp38-cp38-win32.whl", hash = "sha256:9cd10cf338e0907fdcf9eac9087faa30f150ef5445af5a545d307055141dd7a4"}, + {file = "debugpy-1.6.7-cp38-cp38-win_amd64.whl", hash = "sha256:aaf6da50377ff4056c8ed470da24632b42e4087bc826845daad7af211e00faad"}, + {file = "debugpy-1.6.7-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:0679b7e1e3523bd7d7869447ec67b59728675aadfc038550a63a362b63029d2c"}, + {file = "debugpy-1.6.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de86029696e1b3b4d0d49076b9eba606c226e33ae312a57a46dca14ff370894d"}, + {file = "debugpy-1.6.7-cp39-cp39-win32.whl", hash = "sha256:d71b31117779d9a90b745720c0eab54ae1da76d5b38c8026c654f4a066b0130a"}, + {file = "debugpy-1.6.7-cp39-cp39-win_amd64.whl", hash = "sha256:c0ff93ae90a03b06d85b2c529eca51ab15457868a377c4cc40a23ab0e4e552a3"}, + {file = "debugpy-1.6.7-py2.py3-none-any.whl", hash = "sha256:53f7a456bc50706a0eaabecf2d3ce44c4d5010e46dfc65b6b81a518b42866267"}, + {file = "debugpy-1.6.7.zip", hash = "sha256:c4c2f0810fa25323abfdfa36cbbbb24e5c3b1a42cb762782de64439c575d67f2"}, +] decorator = [ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] +defusedxml = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] docker-pycreds = [ {file = "docker-pycreds-0.4.0.tar.gz", hash = "sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4"}, {file = "docker_pycreds-0.4.0-py2.py3-none-any.whl", hash = "sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49"}, ] +exceptiongroup = [ + {file = "exceptiongroup-1.1.2-py3-none-any.whl", hash = "sha256:e346e69d186172ca7cf029c8c1d16235aa0e04035e5750b4b95039e65204328f"}, + {file = "exceptiongroup-1.1.2.tar.gz", hash = "sha256:12c3e887d6485d16943a309616de20ae5582633e0a2eda17f4e10fd61c1e8af5"}, +] +executing = [ + {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"}, + {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"}, +] +fastjsonschema = [ + {file = "fastjsonschema-2.17.1-py3-none-any.whl", hash = "sha256:4b90b252628ca695280924d863fe37234eebadc29c5360d322571233dc9746e0"}, + {file = "fastjsonschema-2.17.1.tar.gz", hash = "sha256:f4eeb8a77cef54861dbf7424ac8ce71306f12cbb086c45131bcba2c6a4f726e3"}, +] filelock = [ {file = "filelock-3.12.2-py3-none-any.whl", hash = "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"}, {file = "filelock-3.12.2.tar.gz", hash = "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81"}, @@ -948,6 +2128,10 @@ fonttools = [ {file = "fonttools-4.40.0-py3-none-any.whl", hash = "sha256:200729d12461e2038700d31f0d49ad5a7b55855dec7525074979a06b46f88505"}, {file = "fonttools-4.40.0.tar.gz", hash = "sha256:337b6e83d7ee73c40ea62407f2ce03b07c3459e213b6f332b94a69923b9e1cb9"}, ] +fqdn = [ + {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, + {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, +] gitdb = [ {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, @@ -1045,10 +2229,78 @@ idna = [ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] +ipykernel = [ + {file = "ipykernel-6.24.0-py3-none-any.whl", hash = "sha256:2f5fffc7ad8f1fd5aadb4e171ba9129d9668dbafa374732cf9511ada52d6547f"}, + {file = "ipykernel-6.24.0.tar.gz", hash = "sha256:29cea0a716b1176d002a61d0b0c851f34536495bc4ef7dd0222c88b41b816123"}, +] +ipython = [ + {file = "ipython-8.14.0-py3-none-any.whl", hash = "sha256:248aca623f5c99a6635bc3857677b7320b9b8039f99f070ee0d20a5ca5a8e6bf"}, + {file = "ipython-8.14.0.tar.gz", hash = "sha256:1d197b907b6ba441b692c48cf2a3a2de280dc0ac91a3405b39349a50272ca0a1"}, +] +isoduration = [ + {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, + {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, +] +jedi = [ + {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"}, + {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"}, +] jinja2 = [ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, ] +json5 = [ + {file = "json5-0.9.14-py2.py3-none-any.whl", hash = "sha256:740c7f1b9e584a468dbb2939d8d458db3427f2c93ae2139d05f47e453eae964f"}, + {file = "json5-0.9.14.tar.gz", hash = "sha256:9ed66c3a6ca3510a976a9ef9b8c0787de24802724ab1860bc0153c7fdd589b02"}, +] +jsonpointer = [ + {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, +] +jsonschema = [ + {file = "jsonschema-4.18.0-py3-none-any.whl", hash = "sha256:b508dd6142bd03f4c3670534c80af68cd7bbff9ea830b9cf2625d4a3c49ddf60"}, + {file = "jsonschema-4.18.0.tar.gz", hash = "sha256:8caf5b57a990a98e9b39832ef3cb35c176fe331414252b6e1b26fd5866f891a4"}, +] +jsonschema-specifications = [ + {file = "jsonschema_specifications-2023.6.1-py3-none-any.whl", hash = "sha256:3d2b82663aff01815f744bb5c7887e2121a63399b49b104a3c96145474d091d7"}, + {file = "jsonschema_specifications-2023.6.1.tar.gz", hash = "sha256:ca1c4dd059a9e7b34101cf5b3ab7ff1d18b139f35950d598d629837ef66e8f28"}, +] +jupyter-client = [ + {file = "jupyter_client-8.3.0-py3-none-any.whl", hash = "sha256:7441af0c0672edc5d28035e92ba5e32fadcfa8a4e608a434c228836a89df6158"}, + {file = "jupyter_client-8.3.0.tar.gz", hash = "sha256:3af69921fe99617be1670399a0b857ad67275eefcfa291e2c81a160b7b650f5f"}, +] +jupyter-core = [ + {file = "jupyter_core-5.3.1-py3-none-any.whl", hash = "sha256:ae9036db959a71ec1cac33081eeb040a79e681f08ab68b0883e9a676c7a90dce"}, + {file = "jupyter_core-5.3.1.tar.gz", hash = "sha256:5ba5c7938a7f97a6b0481463f7ff0dbac7c15ba48cf46fa4035ca6e838aa1aba"}, +] +jupyter-events = [ + {file = "jupyter_events-0.6.3-py3-none-any.whl", hash = "sha256:57a2749f87ba387cd1bfd9b22a0875b889237dbf2edc2121ebb22bde47036c17"}, + {file = "jupyter_events-0.6.3.tar.gz", hash = "sha256:9a6e9995f75d1b7146b436ea24d696ce3a35bfa8bfe45e0c33c334c79464d0b3"}, +] +jupyter-lsp = [ + {file = "jupyter-lsp-2.2.0.tar.gz", hash = "sha256:8ebbcb533adb41e5d635eb8fe82956b0aafbf0fd443b6c4bfa906edeeb8635a1"}, + {file = "jupyter_lsp-2.2.0-py3-none-any.whl", hash = "sha256:9e06b8b4f7dd50300b70dd1a78c0c3b0c3d8fa68e0f2d8a5d1fbab62072aca3f"}, +] +jupyter-server = [ + {file = "jupyter_server-2.7.0-py3-none-any.whl", hash = "sha256:6a77912aff643e53fa14bdb2634884b52b784a4be77ce8e93f7283faed0f0849"}, + {file = "jupyter_server-2.7.0.tar.gz", hash = "sha256:36da0a266d31a41ac335a366c88933c17dfa5bb817a48f5c02c16d303bc9477f"}, +] +jupyter-server-terminals = [ + {file = "jupyter_server_terminals-0.4.4-py3-none-any.whl", hash = "sha256:75779164661cec02a8758a5311e18bb8eb70c4e86c6b699403100f1585a12a36"}, + {file = "jupyter_server_terminals-0.4.4.tar.gz", hash = "sha256:57ab779797c25a7ba68e97bcfb5d7740f2b5e8a83b5e8102b10438041a7eac5d"}, +] +jupyterlab = [ + {file = "jupyterlab-4.0.2-py3-none-any.whl", hash = "sha256:201b4f729a7dc5e22ca6c4dd8944cde792f1cb008d7c6b821e0a48d2502205c8"}, + {file = "jupyterlab-4.0.2.tar.gz", hash = "sha256:0a77898aebb55da391e5f57022774c089fb075e98803ff3d514a79b727dc934d"}, +] +jupyterlab-pygments = [ + {file = "jupyterlab_pygments-0.2.2-py2.py3-none-any.whl", hash = "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f"}, + {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, +] +jupyterlab-server = [ + {file = "jupyterlab_server-2.23.0-py3-none-any.whl", hash = "sha256:a5ea2c839336a8ba7c38c8e7b2f24cedf919f0d439f4d2e606d9322013a95788"}, + {file = "jupyterlab_server-2.23.0.tar.gz", hash = "sha256:83c01aa4ad9451cd61b383e634d939ff713850f4640c0056b2cdb2b6211a74c7"}, +] kiwisolver = [ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"}, {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"}, @@ -1293,10 +2545,30 @@ matplotlib = [ {file = "matplotlib-3.7.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:97cc368a7268141afb5690760921765ed34867ffb9655dd325ed207af85c7529"}, {file = "matplotlib-3.7.1.tar.gz", hash = "sha256:7b73305f25eab4541bd7ee0b96d87e53ae9c9f1823be5659b806cd85786fe882"}, ] +matplotlib-inline = [ + {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, + {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, +] +mistune = [ + {file = "mistune-3.0.1-py3-none-any.whl", hash = "sha256:b9b3e438efbb57c62b5beb5e134dab664800bdf1284a7ee09e8b12b13eb1aac6"}, + {file = "mistune-3.0.1.tar.gz", hash = "sha256:e912116c13aa0944f9dc530db38eb88f6a77087ab128f49f84a48f4c05ea163c"}, +] mpmath = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, ] +nbclient = [ + {file = "nbclient-0.8.0-py3-none-any.whl", hash = "sha256:25e861299e5303a0477568557c4045eccc7a34c17fc08e7959558707b9ebe548"}, + {file = "nbclient-0.8.0.tar.gz", hash = "sha256:f9b179cd4b2d7bca965f900a2ebf0db4a12ebff2f36a711cb66861e4ae158e55"}, +] +nbconvert = [ + {file = "nbconvert-7.6.0-py3-none-any.whl", hash = "sha256:5a445c6794b0791984bc5436608fe2c066cb43c83920c7bc91bde3b765e9a264"}, + {file = "nbconvert-7.6.0.tar.gz", hash = "sha256:24fcf27efdef2b51d7f090cc5ce5a9b178766a55be513c4ebab08c91899ab550"}, +] +nbformat = [ + {file = "nbformat-5.9.1-py3-none-any.whl", hash = "sha256:b7968ebf4811178a4108ee837eae1442e3f054132100f0359219e9ed1ce3ca45"}, + {file = "nbformat-5.9.1.tar.gz", hash = "sha256:3a7f52d040639cbd8a3890218c8b0ffb93211588c57446c90095e32ba5881b5d"}, +] nest-asyncio = [ {file = "nest_asyncio-1.5.6-py3-none-any.whl", hash = "sha256:b9a953fb40dceaa587d109609098db21900182b16440652454a146cffb06e8b8"}, {file = "nest_asyncio-1.5.6.tar.gz", hash = "sha256:d267cc1ff794403f7df692964d1d2a3fa9418ffea2a3f6859a439ff482fef290"}, @@ -1305,6 +2577,10 @@ networkx = [ {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, ] +notebook-shim = [ + {file = "notebook_shim-0.2.3-py3-none-any.whl", hash = "sha256:a83496a43341c1674b093bfcebf0fe8e74cbe7eda5fd2bbc56f8e39e1486c0c7"}, + {file = "notebook_shim-0.2.3.tar.gz", hash = "sha256:f69388ac283ae008cd506dda10d0288b09a017d822d5e8c7129a152cbd3ce7e9"}, +] numpy = [ {file = "numpy-1.25.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8aa130c3042052d656751df5e81f6d61edff3e289b5994edcf77f54118a8d9f4"}, {file = "numpy-1.25.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e3f2b96e3b63c978bc29daaa3700c028fe3f049ea3031b58aa33fe2a5809d24"}, @@ -1346,6 +2622,10 @@ obspy = [ {file = "obspy-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:2090a95b08b214575892c3d99bb3362b13a3b0f4689d4ee55f95ea4d8a2cbc26"}, {file = "obspy-1.4.0.tar.gz", hash = "sha256:336a6e1d9a485732b08173cb5dc1dd720a8e53f3b54c180a62bb8ceaa5fe5c06"}, ] +overrides = [ + {file = "overrides-7.3.1-py3-none-any.whl", hash = "sha256:6187d8710a935d09b0bcef8238301d6ee2569d2ac1ae0ec39a8c7924e27f58ca"}, + {file = "overrides-7.3.1.tar.gz", hash = "sha256:8b97c6c1e1681b78cbc9424b138d880f0803c2254c5ebaabdde57bb6c62093f2"}, +] packaging = [ {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, @@ -1377,9 +2657,25 @@ pandas = [ {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, ] +pandocfilters = [ + {file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"}, + {file = "pandocfilters-1.5.0.tar.gz", hash = "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38"}, +] +parso = [ + {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, + {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, +] pathtools = [ {file = "pathtools-0.1.2.tar.gz", hash = "sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0"}, ] +pexpect = [ + {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, + {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, +] +pickleshare = [ + {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, + {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, +] pillow = [ {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, @@ -1436,6 +2732,18 @@ pillow = [ {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, ] +platformdirs = [ + {file = "platformdirs-3.8.1-py3-none-any.whl", hash = "sha256:cec7b889196b9144d088e4c57d9ceef7374f6c39694ad1577a0aab50d27ea28c"}, + {file = "platformdirs-3.8.1.tar.gz", hash = "sha256:f87ca4fcff7d2b0f81c6a748a77973d7af0f4d526f98f308477c3c436c74d528"}, +] +prometheus-client = [ + {file = "prometheus_client-0.17.1-py3-none-any.whl", hash = "sha256:e537f37160f6807b8202a6fc4764cdd19bac5480ddd3e0d463c3002b34462101"}, + {file = "prometheus_client-0.17.1.tar.gz", hash = "sha256:21e674f39831ae3f8acde238afd9a27a37d0d2fb5a28ea094f0ce25d2cbf2091"}, +] +prompt-toolkit = [ + {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"}, + {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"}, +] protobuf = [ {file = "protobuf-4.23.3-cp310-abi3-win32.whl", hash = "sha256:514b6bbd54a41ca50c86dd5ad6488afe9505901b3557c5e0f7823a0cf67106fb"}, {file = "protobuf-4.23.3-cp310-abi3-win_amd64.whl", hash = "sha256:cc14358a8742c4e06b1bfe4be1afbdf5c9f6bd094dff3e14edb78a1513893ff5"}, @@ -1467,6 +2775,22 @@ psutil = [ {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, ] +ptyprocess = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] +pure-eval = [ + {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, + {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, +] +pycparser = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] +pygments = [ + {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, + {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, +] pyparsing = [ {file = "pyparsing-3.1.0-py3-none-any.whl", hash = "sha256:d554a96d1a7d3ddaf7183104485bc19fd80543ad6ac5bdb6426719d766fb06c1"}, {file = "pyparsing-3.1.0.tar.gz", hash = "sha256:edb662d6fe322d6e990b1594b5feaeadf806803359e3d4d42f11e295e588f0ea"}, @@ -1479,10 +2803,38 @@ python-dotenv = [ {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, ] +python-json-logger = [ + {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"}, + {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"}, +] pytz = [ {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, ] +pywin32 = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] +pywinpty = [ + {file = "pywinpty-2.0.10-cp310-none-win_amd64.whl", hash = "sha256:4c7d06ad10f6e92bc850a467f26d98f4f30e73d2fe5926536308c6ae0566bc16"}, + {file = "pywinpty-2.0.10-cp311-none-win_amd64.whl", hash = "sha256:7ffbd66310b83e42028fc9df7746118978d94fba8c1ebf15a7c1275fdd80b28a"}, + {file = "pywinpty-2.0.10-cp37-none-win_amd64.whl", hash = "sha256:38cb924f2778b5751ef91a75febd114776b3af0ae411bc667be45dd84fc881d3"}, + {file = "pywinpty-2.0.10-cp38-none-win_amd64.whl", hash = "sha256:902d79444b29ad1833b8d5c3c9aabdfd428f4f068504430df18074007c8c0de8"}, + {file = "pywinpty-2.0.10-cp39-none-win_amd64.whl", hash = "sha256:3c46aef80dd50979aff93de199e4a00a8ee033ba7a03cadf0a91fed45f0c39d7"}, + {file = "pywinpty-2.0.10.tar.gz", hash = "sha256:cdbb5694cf8c7242c2ecfaca35c545d31fa5d5814c3d67a4e628f803f680ebea"}, +] pyyaml = [ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, @@ -1525,10 +2877,200 @@ pyyaml = [ {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, ] +pyzmq = [ + {file = "pyzmq-25.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:1a6169e69034eaa06823da6a93a7739ff38716142b3596c180363dee729d713d"}, + {file = "pyzmq-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:19d0383b1f18411d137d891cab567de9afa609b214de68b86e20173dc624c101"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1e931d9a92f628858a50f5bdffdfcf839aebe388b82f9d2ccd5d22a38a789dc"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97d984b1b2f574bc1bb58296d3c0b64b10e95e7026f8716ed6c0b86d4679843f"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:154bddda2a351161474b36dba03bf1463377ec226a13458725183e508840df89"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cb6d161ae94fb35bb518b74bb06b7293299c15ba3bc099dccd6a5b7ae589aee3"}, + {file = "pyzmq-25.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:90146ab578931e0e2826ee39d0c948d0ea72734378f1898939d18bc9c823fcf9"}, + {file = "pyzmq-25.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:831ba20b660b39e39e5ac8603e8193f8fce1ee03a42c84ade89c36a251449d80"}, + {file = "pyzmq-25.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3a522510e3434e12aff80187144c6df556bb06fe6b9d01b2ecfbd2b5bfa5c60c"}, + {file = "pyzmq-25.1.0-cp310-cp310-win32.whl", hash = "sha256:be24a5867b8e3b9dd5c241de359a9a5217698ff616ac2daa47713ba2ebe30ad1"}, + {file = "pyzmq-25.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:5693dcc4f163481cf79e98cf2d7995c60e43809e325b77a7748d8024b1b7bcba"}, + {file = "pyzmq-25.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:13bbe36da3f8aaf2b7ec12696253c0bf6ffe05f4507985a8844a1081db6ec22d"}, + {file = "pyzmq-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:69511d604368f3dc58d4be1b0bad99b61ee92b44afe1cd9b7bd8c5e34ea8248a"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a983c8694667fd76d793ada77fd36c8317e76aa66eec75be2653cef2ea72883"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:332616f95eb400492103ab9d542b69d5f0ff628b23129a4bc0a2fd48da6e4e0b"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58416db767787aedbfd57116714aad6c9ce57215ffa1c3758a52403f7c68cff5"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cad9545f5801a125f162d09ec9b724b7ad9b6440151b89645241d0120e119dcc"}, + {file = "pyzmq-25.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d6128d431b8dfa888bf51c22a04d48bcb3d64431caf02b3cb943269f17fd2994"}, + {file = "pyzmq-25.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2b15247c49d8cbea695b321ae5478d47cffd496a2ec5ef47131a9e79ddd7e46c"}, + {file = "pyzmq-25.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:442d3efc77ca4d35bee3547a8e08e8d4bb88dadb54a8377014938ba98d2e074a"}, + {file = "pyzmq-25.1.0-cp311-cp311-win32.whl", hash = "sha256:65346f507a815a731092421d0d7d60ed551a80d9b75e8b684307d435a5597425"}, + {file = "pyzmq-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:8b45d722046fea5a5694cba5d86f21f78f0052b40a4bbbbf60128ac55bfcc7b6"}, + {file = "pyzmq-25.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f45808eda8b1d71308c5416ef3abe958f033fdbb356984fabbfc7887bed76b3f"}, + {file = "pyzmq-25.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b697774ea8273e3c0460cf0bba16cd85ca6c46dfe8b303211816d68c492e132"}, + {file = "pyzmq-25.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b324fa769577fc2c8f5efcd429cef5acbc17d63fe15ed16d6dcbac2c5eb00849"}, + {file = "pyzmq-25.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:5873d6a60b778848ce23b6c0ac26c39e48969823882f607516b91fb323ce80e5"}, + {file = "pyzmq-25.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:f0d9e7ba6a815a12c8575ba7887da4b72483e4cfc57179af10c9b937f3f9308f"}, + {file = "pyzmq-25.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:414b8beec76521358b49170db7b9967d6974bdfc3297f47f7d23edec37329b00"}, + {file = "pyzmq-25.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:01f06f33e12497dca86353c354461f75275a5ad9eaea181ac0dc1662da8074fa"}, + {file = "pyzmq-25.1.0-cp36-cp36m-win32.whl", hash = "sha256:b5a07c4f29bf7cb0164664ef87e4aa25435dcc1f818d29842118b0ac1eb8e2b5"}, + {file = "pyzmq-25.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:968b0c737797c1809ec602e082cb63e9824ff2329275336bb88bd71591e94a90"}, + {file = "pyzmq-25.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:47b915ba666c51391836d7ed9a745926b22c434efa76c119f77bcffa64d2c50c"}, + {file = "pyzmq-25.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5af31493663cf76dd36b00dafbc839e83bbca8a0662931e11816d75f36155897"}, + {file = "pyzmq-25.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5489738a692bc7ee9a0a7765979c8a572520d616d12d949eaffc6e061b82b4d1"}, + {file = "pyzmq-25.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1fc56a0221bdf67cfa94ef2d6ce5513a3d209c3dfd21fed4d4e87eca1822e3a3"}, + {file = "pyzmq-25.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:75217e83faea9edbc29516fc90c817bc40c6b21a5771ecb53e868e45594826b0"}, + {file = "pyzmq-25.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3830be8826639d801de9053cf86350ed6742c4321ba4236e4b5568528d7bfed7"}, + {file = "pyzmq-25.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3575699d7fd7c9b2108bc1c6128641a9a825a58577775ada26c02eb29e09c517"}, + {file = "pyzmq-25.1.0-cp37-cp37m-win32.whl", hash = "sha256:95bd3a998d8c68b76679f6b18f520904af5204f089beebb7b0301d97704634dd"}, + {file = "pyzmq-25.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:dbc466744a2db4b7ca05589f21ae1a35066afada2f803f92369f5877c100ef62"}, + {file = "pyzmq-25.1.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:3bed53f7218490c68f0e82a29c92335daa9606216e51c64f37b48eb78f1281f4"}, + {file = "pyzmq-25.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eb52e826d16c09ef87132c6e360e1879c984f19a4f62d8a935345deac43f3c12"}, + {file = "pyzmq-25.1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ddbef8b53cd16467fdbfa92a712eae46dd066aa19780681a2ce266e88fbc7165"}, + {file = "pyzmq-25.1.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9301cf1d7fc1ddf668d0abbe3e227fc9ab15bc036a31c247276012abb921b5ff"}, + {file = "pyzmq-25.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e23a8c3b6c06de40bdb9e06288180d630b562db8ac199e8cc535af81f90e64b"}, + {file = "pyzmq-25.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4a82faae00d1eed4809c2f18b37f15ce39a10a1c58fe48b60ad02875d6e13d80"}, + {file = "pyzmq-25.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c8398a1b1951aaa330269c35335ae69744be166e67e0ebd9869bdc09426f3871"}, + {file = "pyzmq-25.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d40682ac60b2a613d36d8d3a0cd14fbdf8e7e0618fbb40aa9fa7b796c9081584"}, + {file = "pyzmq-25.1.0-cp38-cp38-win32.whl", hash = "sha256:33d5c8391a34d56224bccf74f458d82fc6e24b3213fc68165c98b708c7a69325"}, + {file = "pyzmq-25.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:c66b7ff2527e18554030319b1376d81560ca0742c6e0b17ff1ee96624a5f1afd"}, + {file = "pyzmq-25.1.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:af56229ea6527a849ac9fb154a059d7e32e77a8cba27e3e62a1e38d8808cb1a5"}, + {file = "pyzmq-25.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdca18b94c404af6ae5533cd1bc310c4931f7ac97c148bbfd2cd4bdd62b96253"}, + {file = "pyzmq-25.1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b6b42f7055bbc562f63f3df3b63e3dd1ebe9727ff0f124c3aa7bcea7b3a00f9"}, + {file = "pyzmq-25.1.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4c2fc7aad520a97d64ffc98190fce6b64152bde57a10c704b337082679e74f67"}, + {file = "pyzmq-25.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be86a26415a8b6af02cd8d782e3a9ae3872140a057f1cadf0133de685185c02b"}, + {file = "pyzmq-25.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:851fb2fe14036cfc1960d806628b80276af5424db09fe5c91c726890c8e6d943"}, + {file = "pyzmq-25.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2a21fec5c3cea45421a19ccbe6250c82f97af4175bc09de4d6dd78fb0cb4c200"}, + {file = "pyzmq-25.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bad172aba822444b32eae54c2d5ab18cd7dee9814fd5c7ed026603b8cae2d05f"}, + {file = "pyzmq-25.1.0-cp39-cp39-win32.whl", hash = "sha256:4d67609b37204acad3d566bb7391e0ecc25ef8bae22ff72ebe2ad7ffb7847158"}, + {file = "pyzmq-25.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:71c7b5896e40720d30cd77a81e62b433b981005bbff0cb2f739e0f8d059b5d99"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4cb27ef9d3bdc0c195b2dc54fcb8720e18b741624686a81942e14c8b67cc61a6"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0c4fc2741e0513b5d5a12fe200d6785bbcc621f6f2278893a9ca7bed7f2efb7d"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fc34fdd458ff77a2a00e3c86f899911f6f269d393ca5675842a6e92eea565bae"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8751f9c1442624da391bbd92bd4b072def6d7702a9390e4479f45c182392ff78"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:6581e886aec3135964a302a0f5eb68f964869b9efd1dbafdebceaaf2934f8a68"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5482f08d2c3c42b920e8771ae8932fbaa0a67dff925fc476996ddd8155a170f3"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7fbcafa3ea16d1de1f213c226005fea21ee16ed56134b75b2dede5a2129e62"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:adecf6d02b1beab8d7c04bc36f22bb0e4c65a35eb0b4750b91693631d4081c70"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6d39e42a0aa888122d1beb8ec0d4ddfb6c6b45aecb5ba4013c27e2f28657765"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7018289b402ebf2b2c06992813523de61d4ce17bd514c4339d8f27a6f6809492"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9e68ae9864d260b18f311b68d29134d8776d82e7f5d75ce898b40a88df9db30f"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e21cc00e4debe8f54c3ed7b9fcca540f46eee12762a9fa56feb8512fd9057161"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f666ae327a6899ff560d741681fdcdf4506f990595201ed39b44278c471ad98"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f5efcc29056dfe95e9c9db0dfbb12b62db9c4ad302f812931b6d21dd04a9119"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:48e5e59e77c1a83162ab3c163fc01cd2eebc5b34560341a67421b09be0891287"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:108c96ebbd573d929740d66e4c3d1bdf31d5cde003b8dc7811a3c8c5b0fc173b"}, + {file = "pyzmq-25.1.0.tar.gz", hash = "sha256:80c41023465d36280e801564a69cbfce8ae85ff79b080e1913f6e90481fb8957"}, +] +referencing = [ + {file = "referencing-0.29.1-py3-none-any.whl", hash = "sha256:d3c8f323ee1480095da44d55917cfb8278d73d6b4d5f677e3e40eb21314ac67f"}, + {file = "referencing-0.29.1.tar.gz", hash = "sha256:90cb53782d550ba28d2166ef3f55731f38397def8832baac5d45235f1995e35e"}, +] requests = [ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, ] +rfc3339-validator = [ + {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, + {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, +] +rfc3986-validator = [ + {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, + {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, +] +rpds-py = [ + {file = "rpds_py-0.8.10-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:93d06cccae15b3836247319eee7b6f1fdcd6c10dabb4e6d350d27bd0bdca2711"}, + {file = "rpds_py-0.8.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3816a890a6a9e9f1de250afa12ca71c9a7a62f2b715a29af6aaee3aea112c181"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7c6304b894546b5a6bdc0fe15761fa53fe87d28527a7142dae8de3c663853e1"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad3bfb44c8840fb4be719dc58e229f435e227fbfbe133dc33f34981ff622a8f8"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14f1c356712f66653b777ecd8819804781b23dbbac4eade4366b94944c9e78ad"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82bb361cae4d0a627006dadd69dc2f36b7ad5dc1367af9d02e296ec565248b5b"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2e3c4f2a8e3da47f850d7ea0d7d56720f0f091d66add889056098c4b2fd576c"}, + {file = "rpds_py-0.8.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15a90d0ac11b4499171067ae40a220d1ca3cb685ec0acc356d8f3800e07e4cb8"}, + {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:70bb9c8004b97b4ef7ae56a2aa56dfaa74734a0987c78e7e85f00004ab9bf2d0"}, + {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d64f9f88d5203274a002b54442cafc9c7a1abff2a238f3e767b70aadf919b451"}, + {file = "rpds_py-0.8.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ccbbd276642788c4376fbe8d4e6c50f0fb4972ce09ecb051509062915891cbf0"}, + {file = "rpds_py-0.8.10-cp310-none-win32.whl", hash = "sha256:fafc0049add8043ad07ab5382ee80d80ed7e3699847f26c9a5cf4d3714d96a84"}, + {file = "rpds_py-0.8.10-cp310-none-win_amd64.whl", hash = "sha256:915031002c86a5add7c6fd4beb601b2415e8a1c956590a5f91d825858e92fe6e"}, + {file = "rpds_py-0.8.10-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:84eb541a44f7a18f07a6bfc48b95240739e93defe1fdfb4f2a295f37837945d7"}, + {file = "rpds_py-0.8.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f59996d0550894affaad8743e97b9b9c98f638b221fac12909210ec3d9294786"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9adb5664b78fcfcd830000416c8cc69853ef43cb084d645b3f1f0296edd9bae"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f96f3f98fbff7af29e9edf9a6584f3c1382e7788783d07ba3721790625caa43e"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:376b8de737401050bd12810003d207e824380be58810c031f10ec563ff6aef3d"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d1c2bc319428d50b3e0fa6b673ab8cc7fa2755a92898db3a594cbc4eeb6d1f7"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73a1e48430f418f0ac3dfd87860e4cc0d33ad6c0f589099a298cb53724db1169"}, + {file = "rpds_py-0.8.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134ec8f14ca7dbc6d9ae34dac632cdd60939fe3734b5d287a69683c037c51acb"}, + {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4b519bac7c09444dd85280fd60f28c6dde4389c88dddf4279ba9b630aca3bbbe"}, + {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9cd57981d9fab04fc74438d82460f057a2419974d69a96b06a440822d693b3c0"}, + {file = "rpds_py-0.8.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69d089c026f6a8b9d64a06ff67dc3be196707b699d7f6ca930c25f00cf5e30d8"}, + {file = "rpds_py-0.8.10-cp311-none-win32.whl", hash = "sha256:220bdcad2d2936f674650d304e20ac480a3ce88a40fe56cd084b5780f1d104d9"}, + {file = "rpds_py-0.8.10-cp311-none-win_amd64.whl", hash = "sha256:6c6a0225b8501d881b32ebf3f5807a08ad3685b5eb5f0a6bfffd3a6e039b2055"}, + {file = "rpds_py-0.8.10-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e3d0cd3dff0e7638a7b5390f3a53057c4e347f4ef122ee84ed93fc2fb7ea4aa2"}, + {file = "rpds_py-0.8.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d77dff3a5aa5eedcc3da0ebd10ff8e4969bc9541aa3333a8d41715b429e99f47"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41c89a366eae49ad9e65ed443a8f94aee762931a1e3723749d72aeac80f5ef2f"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3793c21494bad1373da517001d0849eea322e9a049a0e4789e50d8d1329df8e7"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:805a5f3f05d186c5d50de2e26f765ba7896d0cc1ac5b14ffc36fae36df5d2f10"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b01b39ad5411563031ea3977bbbc7324d82b088e802339e6296f082f78f6115c"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f1e860be21f3e83011116a65e7310486300e08d9a3028e73e8d13bb6c77292"}, + {file = "rpds_py-0.8.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a13c8e56c46474cd5958d525ce6a9996727a83d9335684e41f5192c83deb6c58"}, + {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:93d99f957a300d7a4ced41615c45aeb0343bb8f067c42b770b505de67a132346"}, + {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:148b0b38d719c0760e31ce9285a9872972bdd7774969a4154f40c980e5beaca7"}, + {file = "rpds_py-0.8.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3cc5e5b5514796f45f03a568981971b12a3570f3de2e76114f7dc18d4b60a3c4"}, + {file = "rpds_py-0.8.10-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e8e24b210a4deb5a7744971f8f77393005bae7f873568e37dfd9effe808be7f7"}, + {file = "rpds_py-0.8.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b41941583adce4242af003d2a8337b066ba6148ca435f295f31ac6d9e4ea2722"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c490204e16bca4f835dba8467869fe7295cdeaa096e4c5a7af97f3454a97991"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ee45cd1d84beed6cbebc839fd85c2e70a3a1325c8cfd16b62c96e2ffb565eca"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a8ca409f1252e1220bf09c57290b76cae2f14723746215a1e0506472ebd7bdf"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96b293c0498c70162effb13100624c5863797d99df75f2f647438bd10cbf73e4"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4627520a02fccbd324b33c7a83e5d7906ec746e1083a9ac93c41ac7d15548c7"}, + {file = "rpds_py-0.8.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e39d7ab0c18ac99955b36cd19f43926450baba21e3250f053e0704d6ffd76873"}, + {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ba9f1d1ebe4b63801977cec7401f2d41e888128ae40b5441270d43140efcad52"}, + {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:802f42200d8caf7f25bbb2a6464cbd83e69d600151b7e3b49f49a47fa56b0a38"}, + {file = "rpds_py-0.8.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d19db6ba816e7f59fc806c690918da80a7d186f00247048cd833acdab9b4847b"}, + {file = "rpds_py-0.8.10-cp38-none-win32.whl", hash = "sha256:7947e6e2c2ad68b1c12ee797d15e5f8d0db36331200b0346871492784083b0c6"}, + {file = "rpds_py-0.8.10-cp38-none-win_amd64.whl", hash = "sha256:fa326b3505d5784436d9433b7980171ab2375535d93dd63fbcd20af2b5ca1bb6"}, + {file = "rpds_py-0.8.10-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7b38a9ac96eeb6613e7f312cd0014de64c3f07000e8bf0004ad6ec153bac46f8"}, + {file = "rpds_py-0.8.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c4d42e83ddbf3445e6514f0aff96dca511421ed0392d9977d3990d9f1ba6753c"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b21575031478609db6dbd1f0465e739fe0e7f424a8e7e87610a6c7f68b4eb16"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:574868858a7ff6011192c023a5289158ed20e3f3b94b54f97210a773f2f22921"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae40f4a70a1f40939d66ecbaf8e7edc144fded190c4a45898a8cfe19d8fc85ea"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37f7ee4dc86db7af3bac6d2a2cedbecb8e57ce4ed081f6464510e537589f8b1e"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:695f642a3a5dbd4ad2ffbbacf784716ecd87f1b7a460843b9ddf965ccaeafff4"}, + {file = "rpds_py-0.8.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f43ab4cb04bde6109eb2555528a64dfd8a265cc6a9920a67dcbde13ef53a46c8"}, + {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a11ab0d97be374efd04f640c04fe5c2d3dabc6dfb998954ea946ee3aec97056d"}, + {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:92cf5b3ee60eef41f41e1a2cabca466846fb22f37fc580ffbcb934d1bcab225a"}, + {file = "rpds_py-0.8.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ceaac0c603bf5ac2f505a78b2dcab78d3e6b706be6596c8364b64cc613d208d2"}, + {file = "rpds_py-0.8.10-cp39-none-win32.whl", hash = "sha256:dd4f16e57c12c0ae17606c53d1b57d8d1c8792efe3f065a37cb3341340599d49"}, + {file = "rpds_py-0.8.10-cp39-none-win_amd64.whl", hash = "sha256:c03a435d26c3999c2a8642cecad5d1c4d10c961817536af52035f6f4ee2f5dd0"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0da53292edafecba5e1d8c1218f99babf2ed0bf1c791d83c0ab5c29b57223068"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d20a8ed227683401cc508e7be58cba90cc97f784ea8b039c8cd01111e6043e0"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97cab733d303252f7c2f7052bf021a3469d764fc2b65e6dbef5af3cbf89d4892"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8c398fda6df361a30935ab4c4bccb7f7a3daef2964ca237f607c90e9f3fdf66f"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2eb4b08c45f8f8d8254cdbfacd3fc5d6b415d64487fb30d7380b0d0569837bf1"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7dfb1cbb895810fa2b892b68153c17716c6abaa22c7dc2b2f6dcf3364932a1c"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89c92b74e8bf6f53a6f4995fd52f4bd510c12f103ee62c99e22bc9e05d45583c"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e9c0683cb35a9b5881b41bc01d5568ffc667910d9dbc632a1fba4e7d59e98773"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0eeb2731708207d0fe2619afe6c4dc8cb9798f7de052da891de5f19c0006c315"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:7495010b658ec5b52835f21d8c8b1a7e52e194c50f095d4223c0b96c3da704b1"}, + {file = "rpds_py-0.8.10-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c72ebc22e70e04126158c46ba56b85372bc4d54d00d296be060b0db1671638a4"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2cd3045e7f6375dda64ed7db1c5136826facb0159ea982f77d9cf6125025bd34"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2418cf17d653d24ffb8b75e81f9f60b7ba1b009a23298a433a4720b2a0a17017"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a2edf8173ac0c7a19da21bc68818be1321998528b5e3f748d6ee90c0ba2a1fd"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f29b8c55fd3a2bc48e485e37c4e2df3317f43b5cc6c4b6631c33726f52ffbb3"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a7d20c1cf8d7b3960c5072c265ec47b3f72a0c608a9a6ee0103189b4f28d531"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:521fc8861a86ae54359edf53a15a05fabc10593cea7b3357574132f8427a5e5a"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5c191713e98e7c28800233f039a32a42c1a4f9a001a8a0f2448b07391881036"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:083df0fafe199371206111583c686c985dddaf95ab3ee8e7b24f1fda54515d09"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ed41f3f49507936a6fe7003985ea2574daccfef999775525d79eb67344e23767"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:2614c2732bf45de5c7f9e9e54e18bc78693fa2f635ae58d2895b7965e470378c"}, + {file = "rpds_py-0.8.10-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c60528671d9d467009a6ec284582179f6b88651e83367d0ab54cb739021cd7de"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ee744fca8d1ea822480a2a4e7c5f2e1950745477143668f0b523769426060f29"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a38b9f526d0d6cbdaa37808c400e3d9f9473ac4ff64d33d9163fd05d243dbd9b"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60e0e86e870350e03b3e25f9b1dd2c6cc72d2b5f24e070249418320a6f9097b7"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f53f55a8852f0e49b0fc76f2412045d6ad9d5772251dea8f55ea45021616e7d5"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c493365d3fad241d52f096e4995475a60a80f4eba4d3ff89b713bc65c2ca9615"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:300eb606e6b94a7a26f11c8cc8ee59e295c6649bd927f91e1dbd37a4c89430b6"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a665f6f1a87614d1c3039baf44109094926dedf785e346d8b0a728e9cabd27a"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:927d784648211447201d4c6f1babddb7971abad922b32257ab74de2f2750fad0"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c200b30dd573afa83847bed7e3041aa36a8145221bf0cfdfaa62d974d720805c"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:08166467258fd0240a1256fce272f689f2360227ee41c72aeea103e9e4f63d2b"}, + {file = "rpds_py-0.8.10-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:996cc95830de9bc22b183661d95559ec6b3cd900ad7bc9154c4cbf5be0c9b734"}, + {file = "rpds_py-0.8.10.tar.gz", hash = "sha256:13e643ce8ad502a0263397362fb887594b49cf84bf518d6038c16f235f2bcea4"}, +] scipy = [ {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, @@ -1556,6 +3098,10 @@ seisbench = [ {file = "seisbench-0.4.1-py3-none-any.whl", hash = "sha256:8500cffd4ec59c8dd14a7d22cab22a1f38f76dc1e05926c1c0df0df92abda7de"}, {file = "seisbench-0.4.1.tar.gz", hash = "sha256:57dab768c8cfc145e858189cdea1476e512a30dd8cb310c1e1f71b99d838e725"}, ] +send2trash = [ + {file = "Send2Trash-1.8.2-py3-none-any.whl", hash = "sha256:a384719d99c07ce1eefd6905d2decb6f8b7ed054025bb0e618919f945de4f679"}, + {file = "Send2Trash-1.8.2.tar.gz", hash = "sha256:c132d59fa44b9ca2b1699af5c86f57ce9f4c5eb56629d5d55fbb7a35f84e2312"}, +] sentry-sdk = [ {file = "sentry-sdk-1.26.0.tar.gz", hash = "sha256:760e4fb6d01c994110507133e08ecd4bdf4d75ee4be77f296a3579796cf73134"}, {file = "sentry_sdk-1.26.0-py2.py3-none-any.whl", hash = "sha256:0c9f858337ec3781cf4851972ef42bba8c9828aea116b0dbed8f38c5f9a1896c"}, @@ -1646,6 +3192,14 @@ smmap = [ {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, ] +sniffio = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] +soupsieve = [ + {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, + {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, +] sqlalchemy = [ {file = "SQLAlchemy-2.0.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04383f1e3452f6739084184e427e9d5cb4e68ddc765d52157bf5ef30d5eca14f"}, {file = "SQLAlchemy-2.0.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:724355973297bbe547f3eb98b46ade65a67a3d5a6303f17ab59a2dc6fb938943"}, @@ -1689,10 +3243,22 @@ sqlalchemy = [ {file = "SQLAlchemy-2.0.17-py3-none-any.whl", hash = "sha256:cc9c2630c423ac4973492821b2969f5fe99d9736f3025da670095668fbfcd4d5"}, {file = "SQLAlchemy-2.0.17.tar.gz", hash = "sha256:e186e9e95fb5d993b075c33fe4f38a22105f7ce11cecb5c17b5618181e356702"}, ] +stack-data = [ + {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"}, + {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"}, +] sympy = [ {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, ] +terminado = [ + {file = "terminado-0.17.1-py3-none-any.whl", hash = "sha256:8650d44334eba354dd591129ca3124a6ba42c3d5b70df5051b6921d506fdaeae"}, + {file = "terminado-0.17.1.tar.gz", hash = "sha256:6ccbbcd3a4f8a25a5ec04991f39a0b8db52dfcd487ea0e578d977e6752380333"}, +] +tinycss2 = [ + {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, + {file = "tinycss2-1.2.1.tar.gz", hash = "sha256:8cff3a8f066c2ec677c06dbc7b45619804a6938478d9d73c284b29d14ecb0627"}, +] tomli = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, @@ -1723,10 +3289,27 @@ torchmetrics = [ {file = "torchmetrics-0.11.4-py3-none-any.whl", hash = "sha256:45f892f3534e91f3ad9e2488d1b05a93b7cb76b7d037969435a41a1f24750d9a"}, {file = "torchmetrics-0.11.4.tar.gz", hash = "sha256:1fe45a14b44dd65d90199017dd5a4b5a128d56a8a311da7916c402c18c671494"}, ] +tornado = [ + {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c367ab6c0393d71171123ca5515c61ff62fe09024fa6bf299cd1339dc9456829"}, + {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b46a6ab20f5c7c1cb949c72c1994a4585d2eaa0be4853f50a03b5031e964fc7c"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2de14066c4a38b4ecbbcd55c5cc4b5340eb04f1c5e81da7451ef555859c833f"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05615096845cf50a895026f749195bf0b10b8909f9be672f50b0fe69cba368e4"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b17b1cf5f8354efa3d37c6e28fdfd9c1c1e5122f2cb56dac121ac61baa47cbe"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:29e71c847a35f6e10ca3b5c2990a52ce38b233019d8e858b755ea6ce4dcdd19d"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:834ae7540ad3a83199a8da8f9f2d383e3c3d5130a328889e4cc991acc81e87a0"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6a0848f1aea0d196a7c4f6772197cbe2abc4266f836b0aac76947872cd29b411"}, + {file = "tornado-6.3.2-cp38-abi3-win32.whl", hash = "sha256:7efcbcc30b7c654eb6a8c9c9da787a851c18f8ccd4a5a3a95b05c7accfa068d2"}, + {file = "tornado-6.3.2-cp38-abi3-win_amd64.whl", hash = "sha256:0c325e66c8123c606eea33084976c832aa4e766b7dff8aedd7587ea44a604cdf"}, + {file = "tornado-6.3.2.tar.gz", hash = "sha256:4b927c4f19b71e627b13f3db2324e4ae660527143f9e1f2e2fb404f3a187e2ba"}, +] tqdm = [ {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, ] +traitlets = [ + {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"}, + {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"}, +] typing-extensions = [ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, @@ -1735,6 +3318,10 @@ tzdata = [ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, ] +uri-template = [ + {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, + {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, +] urllib3 = [ {file = "urllib3-2.0.3-py3-none-any.whl", hash = "sha256:48e7fafa40319d358848e1bc6809b208340fafe2096f1725d05d67443d0483d1"}, {file = "urllib3-2.0.3.tar.gz", hash = "sha256:bee28b5e56addb8226c96f7f13ac28cb4c301dd5ea8a6ca179c0b9835e032825"}, @@ -1743,3 +3330,19 @@ wandb = [ {file = "wandb-0.15.4-py3-none-any.whl", hash = "sha256:9018565177e1be14d7d0dd470c583206031c6027c32a98c57fa3bb83955143d7"}, {file = "wandb-0.15.4.tar.gz", hash = "sha256:472daaaa1a4e29a46407a85fd77aadb724c91d87dfe2c37cd82ef77be2257011"}, ] +wcwidth = [ + {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, + {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, +] +webcolors = [ + {file = "webcolors-1.13-py3-none-any.whl", hash = "sha256:29bc7e8752c0a1bd4a1f03c14d6e6a72e93d82193738fa860cbff59d0fcc11bf"}, + {file = "webcolors-1.13.tar.gz", hash = "sha256:c225b674c83fa923be93d235330ce0300373d02885cef23238813b0d5668304a"}, +] +webencodings = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] +websocket-client = [ + {file = "websocket-client-1.6.1.tar.gz", hash = "sha256:c951af98631d24f8df89ab1019fc365f2227c0892f12fd150e935607c79dd0dd"}, + {file = "websocket_client-1.6.1-py3-none-any.whl", hash = "sha256:f1f9f2ad5291f0225a49efad77abf9e700b6fef553900623060dad6e26503b9d"}, +] diff --git a/pyproject.toml b/pyproject.toml index 1916bb1..300b7b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,8 @@ pandas = "^2.0.3" obspy = "^1.4.0" wandb = "^0.15.4" torchmetrics = "^0.11.4" +ipykernel = "^6.24.0" +jupyterlab = "^4.0.2" [tool.poetry.dev-dependencies] diff --git a/scripts/augmentations.py b/scripts/augmentations.py new file mode 100644 index 0000000..afbda1f --- /dev/null +++ b/scripts/augmentations.py @@ -0,0 +1,133 @@ +""" +This file contains augmentations required for the models that are too specific to be merged into SeisBench. +""" + +import numpy as np +import copy + + +class DuplicateEvent: + """ + Adds a rescaled version of the event to the empty part of the trace after the event. + Event position and empty space are determined from a detection. + Detections can be generated for example with :py:class:`~seisbench.generate.labeling.DetectionLabeller`. + + This implementation is modelled after the `implementation for EQTransformer `_. + + .. warning:: + This augmentation does **not** modify the metadata, as representing multiple picks of + the same type is currently not supported. Workflows should therefore always first generate + labels from metadata and then pass the labels in the key `label_keys`. These keys are automatically + adjusted by addition of the labels. + + .. warning:: + This implementation currently has strict shape requirements: + + - (1, samples) for detection + - (channels, samples) for data + - (labels, samples) for labels + + :param inv_scale: The scale factor is defined by as 1/u, where u is uniform. + `inv_scale` defines the minimum and maximum values for u. + Defaults to (1, 10), e.g., scaling by factor 1 to 1/10. + :param detection_key: Key to read detection from. + If key is a tuple, detection will be read from the first key and written to the second one. + :param key: The keys for reading from and writing to the state dict. + If key is a single string, the corresponding entry in state dict is modified. + Otherwise, a 2-tuple is expected, with the first string indicating the key + to read from and the second one the key to write to. + :param label_keys: Keys for the label columns. + Labels of the original and duplicate events will be added and capped at 1. + Note that this will lead to invalid noise traces. + Value can either be a single key specification or a list of key specifications. + Each key specification is either a string, for identical input and output keys, + or as a tuple of two strings, input and output keys. + Defaults to None. + """ + + def __init__( + self, inv_scale=(1, 10), detection_key="detections", key="X", label_keys=None + ): + if isinstance(detection_key, str): + self.detection_key = (detection_key, detection_key) + else: + self.detection_key = detection_key + + if isinstance(key, str): + self.key = (key, key) + else: + self.key = key + + # Single key + if not isinstance(label_keys, list): + if label_keys is None: + label_keys = [] + else: + label_keys = [label_keys] + + # Resolve identical input and output keys + self.label_keys = [] + for key in label_keys: + if isinstance(key, tuple): + self.label_keys.append(key) + else: + self.label_keys.append((key, key)) + + self.inv_scale = inv_scale + + def __call__(self, state_dict): + x, metadata = state_dict[self.key[0]] + detection, _ = state_dict[self.detection_key[0]] + detection_mask = detection[0] > 0.5 + + if detection.shape[-1] != x.shape[-1]: + raise ValueError("Number of samples in trace and detection disagree.") + + if self.key[0] != self.key[1]: + # Ensure metadata is not modified inplace unless input and output key are anyhow identical + metadata = copy.deepcopy(metadata) + + if detection_mask.any(): + n_samples = x.shape[-1] + event_samples = np.arange(n_samples)[detection_mask] + event_start, event_end = np.min(event_samples), np.max(event_samples) + 1 + + if event_end + 20 < n_samples: + second_start = np.random.randint(event_end + 20, n_samples) + scale = 1 / np.random.uniform(*self.inv_scale) + + if self.key[0] != self.key[1]: + # Avoid inplace modification if input and output keys differ + x = x.copy() + + space = min(event_end - event_start, n_samples - second_start) + x[:, second_start : second_start + space] += ( + x[:, event_start : event_start + space] * scale + ) + + shift = second_start - event_start + + for label_key in self.label_keys + [self.detection_key]: + y, metadata = state_dict[label_key[0]] + if y.shape[-1] != n_samples: + raise ValueError( + f"Number of samples disagree between trace and label key '{label_key[0]}'." + ) + + if label_key[0] != label_key[1]: + metadata = copy.deepcopy(metadata) + y = y.copy() + + y[:, shift:] += y[:, :-shift] + y = np.minimum(y, 1) + state_dict[label_key[1]] = (y, metadata) + else: + # Copy entries + for label_key in self.label_keys + [self.detection_key]: + y, metadata = state_dict[label_key[0]] + if label_key[0] != label_key[1]: + metadata = copy.deepcopy(metadata) + y = y.copy() + state_dict[label_key[1]] = (y, metadata) + + state_dict[self.key[1]] = (x, metadata) diff --git a/scripts/collect_results.py b/scripts/collect_results.py new file mode 100644 index 0000000..04eb00c --- /dev/null +++ b/scripts/collect_results.py @@ -0,0 +1,335 @@ +""" +This script collects results in a folder, calculates performance metrics and writes them to csv. +""" + +import argparse +from pathlib import Path +import logging +import pandas as pd +import numpy as np +from sklearn.metrics import ( + precision_recall_curve, + precision_recall_fscore_support, + roc_auc_score, + matthews_corrcoef, +) +from tqdm import tqdm + + +def traverse_path(path, output, cross=False, resampled=False, baer=False): + """ + Traverses the given path and extracts results for each experiment and version + + :param path: Root path + :param output: Path to write results csv to + :param cross: If true, expects cross-domain results. + :return: None + """ + path = Path(path) + + results = [] + + exp_dirs = [x for x in path.iterdir() if x.is_dir()] + for exp_dir in tqdm(exp_dirs): + itr = exp_dir.iterdir() + if baer: + itr = [exp_dir] # Missing version directory in the structure + for version_dir in itr: + if not version_dir.is_dir(): + pass + + results.append( + process_version( + version_dir, cross=cross, resampled=resampled, baer=baer + ) + ) + + results = pd.DataFrame(results) + if cross: + sort_keys = ["data", "model", "target", "lr", "version"] + else: + sort_keys = ["data", "model", "lr", "version"] + results.sort_values(sort_keys, inplace=True) + results.to_csv(output, index=False) + + +def process_version(version_dir: Path, cross: bool, resampled: bool, baer: bool): + """ + Extracts statistics for the given version of the given experiment. + + :param version_dir: Path to the specific version + :param cross: If true, expects cross-domain results. + :return: Results dictionary + """ + stats = parse_exp_name(version_dir, cross=cross, resampled=resampled, baer=baer) + + stats.update(eval_task1(version_dir)) + stats.update(eval_task23(version_dir)) + + return stats + + +def parse_exp_name(version_dir, cross, resampled, baer): + if baer: + exp_name = version_dir.name + version = "0" + else: + exp_name = version_dir.parent.name + version = version_dir.name.split("_")[-1] + + parts = exp_name.split("_") + target = None + sampling_rate = None + if cross or baer: + if len(parts) == 4: + data, model, lr, target = parts + else: + data, model, target = parts + lr = "0.001" + elif resampled: + if len(parts) == 5: + data, model, lr, target, sampling_rate = parts + else: + data, model, target, sampling_rate = parts + lr = "0.001" + else: + if len(parts) == 3: + data, model, lr = parts + else: + data, model, *_ = parts + lr = "0.001" + + # lr = float(lr) + + stats = { + "experiment": exp_name, + "data": data, + "model": model, + "lr": None, + "version": version, + } + + if cross or baer: + stats["target"] = target + if resampled: + stats["target"] = target + stats["sampling_rate"] = sampling_rate + + return stats + + +def eval_task1(version_dir: Path): + if not ( + (version_dir / "dev_task1.csv").is_file() + and (version_dir / "test_task1.csv").is_file() + ): + logging.warning(f"Directory {version_dir} does not contain task 1") + return {} + + stats = {} + + dev_pred = pd.read_csv(version_dir / "dev_task1.csv") + dev_pred["trace_type_bin"] = dev_pred["trace_type"] == "earthquake" + test_pred = pd.read_csv(version_dir / "test_task1.csv") + test_pred["trace_type_bin"] = test_pred["trace_type"] == "earthquake" + + prec, recall, thr = precision_recall_curve( + dev_pred["trace_type_bin"], dev_pred["score_detection"] + ) + + f1 = 2 * prec * recall / (prec + recall) + auc = roc_auc_score(dev_pred["trace_type_bin"], dev_pred["score_detection"]) + + opt_index = np.nanargmax(f1) # F1 optimal threshold index + opt_thr = thr[opt_index] # F1 optimal threshold value + + dev_stats = { + "dev_det_precision": prec[opt_index], + "dev_det_recall": recall[opt_index], + "dev_det_f1": f1[opt_index], + "dev_det_auc": auc, + "det_threshold": opt_thr, + } + stats.update(dev_stats) + + prec, recall, f1, _ = precision_recall_fscore_support( + test_pred["trace_type_bin"], + test_pred["score_detection"] > opt_thr, + average="binary", + ) + auc = roc_auc_score(test_pred["trace_type_bin"], test_pred["score_detection"]) + test_stats = { + "test_det_precision": prec, + "test_det_recall": recall, + "test_det_f1": f1, + "test_det_auc": auc, + } + stats.update(test_stats) + + return stats + + +def eval_task23(version_dir: Path): + print(version_dir / "dev_task23.csv") + if not ( + (version_dir / "dev_task23.csv").is_file() + and (version_dir / "test_task23.csv").is_file() + ): + logging.warning(f"Directory {version_dir} does not contain tasks 2 and 3") + return {} + + stats = {} + + dev_pred = pd.read_csv(version_dir / "dev_task23.csv") + dev_pred["phase_label_bin"] = dev_pred["phase_label"] == "P" + test_pred = pd.read_csv(version_dir / "test_task23.csv") + test_pred["phase_label_bin"] = test_pred["phase_label"] == "P" + + def add_aux_columns(pred): + for col in ["s_sample_pred", "score_p_or_s"]: + if col not in pred.columns: + pred[col] = np.nan + + add_aux_columns(dev_pred) + add_aux_columns(test_pred) + + def nanmask(pred): + """ + Returns all entries that are nan in score_p_or_s, p_sample_pred and s_sample_pred + """ + mask = np.logical_and( + np.isnan(pred["p_sample_pred"]), np.isnan(pred["s_sample_pred"]) + ) + mask = np.logical_and(mask, np.isnan(pred["score_p_or_s"])) + return mask + + if nanmask(dev_pred).all(): + logging.warning(f"{version_dir} contains NaN predictions for tasks 2 and 3") + return {} + + dev_pred = dev_pred[~nanmask(dev_pred)] + test_pred = test_pred[~nanmask(test_pred)] + + skip_task2 = False + if ( + np.logical_or( + np.isnan(dev_pred["score_p_or_s"]), np.isinf(dev_pred["score_p_or_s"]) + ).all() + or np.logical_or( + np.isnan(test_pred["score_p_or_s"]), np.isinf(test_pred["score_p_or_s"]) + ).all() + ): + # For unfortunate combinations of nans and infs, otherwise weird scores can occur + skip_task2 = True + + # Clipping removes infinitely likely P waves, usually resulting from models trained without S arrivals + dev_pred["score_p_or_s"] = np.clip(dev_pred["score_p_or_s"].values, -1e100, 1e100) + test_pred["score_p_or_s"] = np.clip(test_pred["score_p_or_s"].values, -1e100, 1e100) + + dev_pred_restricted = dev_pred[~np.isnan(dev_pred["score_p_or_s"])] + test_pred_restricted = test_pred[~np.isnan(test_pred["score_p_or_s"])] + if len(dev_pred_restricted) > 0 and not skip_task2: + prec, recall, thr = precision_recall_curve( + dev_pred_restricted["phase_label_bin"], dev_pred_restricted["score_p_or_s"] + ) + + f1 = 2 * prec * recall / (prec + recall) + + opt_index = np.nanargmax(f1) # F1 optimal threshold index + opt_thr = thr[opt_index] # F1 optimal threshold value + + # Determine (approximately) optimal MCC threshold using 50 candidates + mcc_thrs = np.sort(dev_pred["score_p_or_s"].values) + mcc_thrs = mcc_thrs[np.linspace(0, len(mcc_thrs) - 1, 50, dtype=int)] + mccs = [] + for thr in mcc_thrs: + mccs.append( + matthews_corrcoef( + dev_pred["phase_label_bin"], dev_pred["score_p_or_s"] > thr + ) + ) + mcc = np.max(mccs) + mcc_thr = mcc_thrs[np.argmax(mccs)] + + dev_stats = { + "dev_phase_precision": prec[opt_index], + "dev_phase_recall": recall[opt_index], + "dev_phase_f1": f1[opt_index], + "phase_threshold": opt_thr, + "dev_phase_mcc": mcc, + "phase_threshold_mcc": mcc_thr, + } + stats.update(dev_stats) + + prec, recall, f1, _ = precision_recall_fscore_support( + test_pred_restricted["phase_label_bin"], + test_pred_restricted["score_p_or_s"] > opt_thr, + average="binary", + ) + mcc = matthews_corrcoef( + test_pred["phase_label_bin"], test_pred["score_p_or_s"] > mcc_thr + ) + test_stats = { + "test_phase_precision": prec, + "test_phase_recall": recall, + "test_phase_f1": f1, + "test_phase_mcc": mcc, + } + stats.update(test_stats) + + for pred, set_str in [(dev_pred, "dev"), (test_pred, "test")]: + for i, phase in enumerate(["P", "S"]): + pred_phase = pred[pred["phase_label"] == phase] + pred_col = f"{phase.lower()}_sample_pred" + + if len(pred_phase) == 0: + continue + + diff = (pred_phase[pred_col] - pred_phase["phase_onset"]) / pred_phase[ + "sampling_rate" + ] + + stats[f"{set_str}_{phase}_mean_s"] = np.mean(diff) + stats[f"{set_str}_{phase}_std_s"] = np.sqrt(np.mean(diff**2)) + stats[f"{set_str}_{phase}_mae_s"] = np.mean(np.abs(diff)) + + return stats + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Collects results from all experiments in a folder and outputs them in condensed csv format." + ) + parser.add_argument( + "path", + type=str, + help="Root path of predictions", + ) + parser.add_argument( + "output", + type=str, + help="Path for the output csv", + ) + parser.add_argument( + "--cross", action="store_true", help="If true, expects cross-domain results." + ) + parser.add_argument( + "--resampled", + action="store_true", + help="If true, expects cross-domain cross-sampling rate results.", + ) + parser.add_argument( + "--baer", + action="store_true", + help="If true, expects results from Baer-Kradolfer picker.", + ) + + args = parser.parse_args() + + traverse_path( + args.path, + args.output, + cross=args.cross, + resampled=args.resampled, + baer=args.baer, + ) diff --git a/scripts/config_loader.py b/scripts/config_loader.py new file mode 100644 index 0000000..478764b --- /dev/null +++ b/scripts/config_loader.py @@ -0,0 +1,26 @@ +import json +from pathlib import Path + + +def load_config(file_path): + with open(file_path, 'r') as config_file: + config = json.load(config_file) + return config + + +project_path = str(Path.cwd().parent) +config_path = project_path + "/config.json" + +config = load_config(config_path) + +data_path = f"{project_path}/{config['data_path']}" +models_path = f"{project_path}/{config['models_path']}" +targets_path = f"{project_path}/{config['targets_path']}" +dataset_name = config['dataset_name'] +configs_path = f"{project_path}/{config['configs_path']}" + +sweep_files = config['sweep_files'] +sampling_rate = config['sampling_rate'] +num_workers = config['num_workers'] +seed = config['seed'] +experiment_count = config['experiment_count'] diff --git a/scripts/data.py b/scripts/data.py new file mode 100644 index 0000000..1fe54dc --- /dev/null +++ b/scripts/data.py @@ -0,0 +1,32 @@ +""" +This file contains functionality related to data. +""" + +import seisbench.data as sbd + + +def get_dataset_by_name(name): + """ + Resolve dataset name to class from seisbench.data. + + :param name: Name of dataset as defined in seisbench.data. + :return: Dataset class from seisbench.data + """ + try: + return sbd.__getattribute__(name) + except AttributeError: + raise ValueError(f"Unknown dataset '{name}'.") + + +def get_custom_dataset(path): + """ + Return custom dataset in seisbench format + :param path: + :return: Dataset class + """ + + try: + return sbd.WaveformDataset(path) + except AttributeError: + raise ValueError(f"Unknown dataset '{path}'.") + diff --git a/scripts/eval.py b/scripts/eval.py new file mode 100644 index 0000000..91b17f4 --- /dev/null +++ b/scripts/eval.py @@ -0,0 +1,248 @@ +""" +This script implements functionality for evaluating models. +Given a model and a set of targets, it calculates and outputs predictions. +""" + +import seisbench.generate as sbg + +import argparse +import pandas as pd +import yaml +from pathlib import Path +import pytorch_lightning as pl +from torch.utils.data import DataLoader +import torch + +import models, data +import logging +from util import default_workers, load_best_model_data +import time +import datetime +from config_loader import models_path, project_path +import os + +data_aliases = { + "ethz": "ETHZ", + "geofon": "GEOFON", + "stead": "STEAD", + "neic": "NEIC", + "instance": "InstanceCountsCombined", + "iquique": "Iquique", + "lendb": "LenDB", + "scedc": "SCEDC" +} + + +def main(weights, targets, sets, batchsize, num_workers, sampling_rate=None, sweep_id=None, test_run=False): + weights = Path(weights) + targets = Path(os.path.abspath(targets)) + print(targets) + # print() + sets = sets.split(",") + + checkpoint_path, version = load_best_model_data(sweep_id, weights) + logging.warning("Starting evaluation of model: \n" + checkpoint_path) + + config_path = f"{models_path}/{weights}/{version}/hparams.yaml" + with open(config_path, "r") as f: + # config = yaml.safe_load(f) + config = yaml.full_load(f) + + model_name = config["model_name"][0] if type(config["model_name"]) == list else config["model_name"] + + model_cls = models.__getattribute__(model_name + "Lit") + model = model_cls.load_from_checkpoint(checkpoint_path) + + data_name = data_aliases[targets.name] if targets.name in data_aliases else None + + if data_name != config["dataset_name"] and targets.name in data_aliases: + logging.warning("Detected cross-domain evaluation") + pred_root = "pred_cross" + parts = weights.name.split() + weight_path_name = "_".join(parts[:2] + [targets.name] + parts[2:]) + + else: + pred_root = "pred" + weight_path_name = weights.name + + if data_name is not None: + dataset = data.get_dataset_by_name(data_name)( + sampling_rate=100, component_order="ZNE", dimension_order="NCW", cache="full" + ) + else: + data_path = project_path + '/' + config['data_path'] + print("Loading dataset: ", data_path) + dataset = data.get_custom_dataset(data_path) + + if sampling_rate is not None: + dataset.sampling_rate = sampling_rate + pred_root = pred_root + "_resampled" + weight_path_name = weight_path_name + f"_{sampling_rate}" + + for eval_set in sets: + split = dataset.get_split(eval_set) + if targets.name == "instance": + logging.warning( + "Overwriting noise trace_names to allow correct identification" + ) + # Replace trace names for noise entries + split._metadata["trace_name"].values[ + -len(split.datasets[-1]) : + ] = split._metadata["trace_name"][-len(split.datasets[-1]) :].apply( + lambda x: "noise_" + x + ) + split._build_trace_name_to_idx_dict() + + logging.warning(f"Starting set {eval_set}") + split.preload_waveforms(pbar=True) + print("eval set shape", split.metadata.shape) + + for task in ["1", "23"]: + task_csv = targets / f"task{task}.csv" + + print(task_csv) + + if not task_csv.is_file(): + continue + + logging.warning(f"Starting task {task}") + + task_targets = pd.read_csv(task_csv) + task_targets = task_targets[task_targets["trace_split"] == eval_set] + if task == "1" and targets.name == "instance": + border = _identify_instance_dataset_border(task_targets) + task_targets["trace_name"].values[border:] = task_targets["trace_name"][ + border: + ].apply(lambda x: "noise_" + x) + + if sampling_rate is not None: + for key in ["start_sample", "end_sample", "phase_onset"]: + if key not in task_targets.columns: + continue + task_targets[key] = ( + task_targets[key] + * sampling_rate + / task_targets["sampling_rate"] + ) + task_targets[sampling_rate] = sampling_rate + + restrict_to_phase = config.get("restrict_to_phase", None) + if restrict_to_phase is not None and "phase_label" in task_targets.columns: + mask = task_targets["phase_label"].isin(list(restrict_to_phase)) + task_targets = task_targets[mask] + + if restrict_to_phase is not None and task == "1": + logging.warning("Skipping task 1 as restrict_to_phase is set.") + continue + + generator = sbg.SteeredGenerator(split, task_targets) + generator.add_augmentations(model.get_eval_augmentations()) + + loader = DataLoader( + generator, batch_size=batchsize, shuffle=False, num_workers=num_workers + ) + # trainer = pl.Trainer(accelerator="gpu", devices=1) + trainer = pl.Trainer() + + predictions = trainer.predict(model, loader) + + # Merge batches + merged_predictions = [] + for i, _ in enumerate(predictions[0]): + merged_predictions.append(torch.cat([x[i] for x in predictions])) + + merged_predictions = [x.cpu().numpy() for x in merged_predictions] + task_targets["score_detection"] = merged_predictions[0] + task_targets["score_p_or_s"] = merged_predictions[1] + task_targets["p_sample_pred"] = ( + merged_predictions[2] + task_targets["start_sample"] + ) + task_targets["s_sample_pred"] = ( + merged_predictions[3] + task_targets["start_sample"] + ) + + pred_path = ( + weights.parent.parent + / pred_root + / weight_path_name + / version + / f"{eval_set}_task{task}.csv" + ) + pred_path.parent.mkdir(exist_ok=True, parents=True) + task_targets.to_csv(pred_path, index=False) + + +def _identify_instance_dataset_border(task_targets): + """ + Calculates the dataset border between Signal and Noise for instance, + assuming it is the only place where the bucket number does not increase + """ + buckets = task_targets["trace_name"].apply(lambda x: int(x.split("$")[0][6:])) + + last_bucket = 0 + for i, bucket in enumerate(buckets): + if bucket < last_bucket: + return i + last_bucket = bucket + + +if __name__ == "__main__": + code_start_time = time.perf_counter() + parser = argparse.ArgumentParser( + description="Evaluate a trained model using a set of targets." + ) + parser.add_argument( + "weights", + type=str, + help="Path to weights. Expected to be in models_path directory." + "The script will automatically load the configuration and the model. " + "The script always uses the checkpoint with lowest validation loss." + "If sweep_id is provided the script considers only the checkpoints generated by that sweep." + "Predictions will be written into the weights path as csv." + "Note: Due to pytorch lightning internals, there exist two weights folders, " + "{weights} and {weight}_{weights}. Please use the former as parameter", + ) + parser.add_argument( + "targets", + type=str, + help="Path to evaluation targets folder. " + "The script will detect which tasks are present base on file names.", + ) + parser.add_argument( + "--sets", + type=str, + default="dev,test", + help="Sets on which to evaluate, separated by commata. Defaults to dev and test.", + ) + parser.add_argument("--batchsize", type=int, default=1024, help="Batch size") + parser.add_argument( + "--num_workers", + default=default_workers, + type=int, + help="Number of workers for data loader", + ) + parser.add_argument( + "--sampling_rate", type=float, help="Overwrites the sampling rate in the data" + ) + parser.add_argument( + "--sweep_id", type=str, help="wandb sweep_id", required=False, default=None + ) + parser.add_argument( + "--test_run", action="store_true", required=False, default=False + ) + args = parser.parse_args() + + main( + args.weights, + args.targets, + args.sets, + batchsize=args.batchsize, + num_workers=args.num_workers, + sampling_rate=args.sampling_rate, + sweep_id=args.sweep_id, + test_run=args.test_run + ) + running_time = str( + datetime.timedelta(seconds=time.perf_counter() - code_start_time) + ) + print(f"Running time: {running_time}") diff --git a/scripts/generate_eval_targets.py b/scripts/generate_eval_targets.py new file mode 100644 index 0000000..9cc7c11 --- /dev/null +++ b/scripts/generate_eval_targets.py @@ -0,0 +1,321 @@ +""" +This script generates evaluation targets for the following three tasks: + +- Earthquake detection (Task 1): Given a 30~s window, does the window contain an earthquake signal? +- Phase identification (Task 2): Given a 10~s window containing exactly one phase onset, identify which phase type. +- Onset time determination (Task 3): Given a 10~s window containing exactly one phase onset, identify the onset time. + +Each target for evaluation will consist of the following information: + +- trace name (as in dataset) +- trace index (in dataset) +- split (as in dataset) +- sampling rate (at which all information is presented) +- start_sample +- end_sample +- trace_type (only task 1: earthquake/noise) +- phase_label (only task 2/3: P/S) +- full_phase_label (only task 2/3: phase label as in the dataset, might be Pn, Pg, etc.) +- phase_onset_sample (only task 2/3: onset sample of the phase relative to full trace) + +It needs to be provided with a dataset and writes a folder with two CSV files, one for task 1 and one for tasks 2 and 3. +Each file will describe targets for train, dev and test, derived from the respective splits. + +When using these tasks for evaluation, the models can make use of waveforms from the context, i.e., +before/after the start and end samples. However, make sure this does not add further bias in the evaluation, +for example by always centring the windows on the picks using context. + +.. warning:: + For comparability, it is strongly advised to use published evaluation targets, instead of generating new ones. + +.. warning:: + This implementation is not optimized and loads the full waveform data for its computations. + This will lead to very high memory usage, as the full dataset will be stored in memory. +""" +import seisbench.data as sbd + +import argparse +from pathlib import Path +import pandas as pd +import numpy as np +from tqdm import tqdm + +from models import phase_dict + + +def main(dataset_name, output, tasks, sampling_rate, noise_before_events): + np.random.seed(42) + tasks = [str(i) in tasks.split(",") for i in range(1, 4)] + + if not any(tasks): + raise ValueError(f"No task defined. Got tasks='{tasks}'.") + + dataset_args = { + "sampling_rate": sampling_rate, + "dimension_order": "NCW", + "cache": "full", + } + + try: + # Check if dataset is available in SeisBench + dataset = sbd.__getattribute__(dataset_name)(**dataset_args) + except AttributeError: + # Otherwise interpret data_in as path + dataset = sbd.WaveformDataset(dataset_name, **dataset_args) + + output = Path(output) + output.mkdir(parents=True, exist_ok=False) + + if "split" in dataset.metadata.columns: + dataset.filter(dataset["split"].isin(["dev", "test"]), inplace=True) + + dataset.preload_waveforms(pbar=True) + + if tasks[0]: + generate_task1(dataset, output, sampling_rate, noise_before_events) + if tasks[1] or tasks[2]: + generate_task23(dataset, output, sampling_rate) + + +def generate_task1(dataset, output, sampling_rate, noise_before_events): + np.random.seed(42) + windowlen = 30 * sampling_rate # 30 s windows + labels = [] + + for i in tqdm(range(len(dataset)), total=len(dataset)): + waveforms, metadata = dataset.get_sample(i) + + if "split" in metadata: + trace_split = metadata["split"] + else: + trace_split = "" + + def checkphase(metadata, phase, phase_label, target_phase, npts): + return ( + phase in metadata + and phase_label == target_phase + and not np.isnan(metadata[phase]) + and 0 <= metadata[phase] < npts + ) + + p_arrivals = [ + metadata[phase] + for phase, phase_label in phase_dict.items() + if checkphase(metadata, phase, phase_label, "P", waveforms.shape[-1]) + ] + s_arrivals = [ + metadata[phase] + for phase, phase_label in phase_dict.items() + if checkphase(metadata, phase, phase_label, "S", waveforms.shape[-1]) + ] + + if len(p_arrivals) == 0 and len(s_arrivals) == 0: + start_sample, end_sample = select_window_containing( + waveforms.shape[-1], windowlen + ) + sample = { + "trace_name": metadata["trace_name"], + "trace_idx": i, + "trace_split": trace_split, + "sampling_rate": sampling_rate, + "start_sample": start_sample, + "end_sample": end_sample, + "trace_type": "noise", + } + labels += [sample] + + else: + first_arrival = min(p_arrivals + s_arrivals) + + start_sample, end_sample = select_window_containing( + waveforms.shape[-1], windowlen, containing=first_arrival + ) + if end_sample - start_sample <= windowlen: + sample = { + "trace_name": metadata["trace_name"], + "trace_idx": i, + "trace_split": trace_split, + "sampling_rate": sampling_rate, + "start_sample": start_sample, + "end_sample": end_sample, + "trace_type": "earthquake", + } + labels += [sample] + + if noise_before_events and first_arrival > windowlen: + start_sample, end_sample = select_window_containing( + min(waveforms.shape[-1], first_arrival), windowlen + ) + if end_sample - start_sample <= windowlen: + sample = { + "trace_name": metadata["trace_name"], + "trace_idx": i, + "trace_split": trace_split, + "sampling_rate": sampling_rate, + "start_sample": start_sample, + "end_sample": end_sample, + "trace_type": "noise", + } + labels += [sample] + + labels = pd.DataFrame(labels) + diff = labels["end_sample"] - labels["start_sample"] + labels = labels[diff > 100] + labels.to_csv(output / "task1.csv", index=False) + + +def generate_task23(dataset, output, sampling_rate): + np.random.seed(42) + windowlen = 10 * sampling_rate # 30 s windows + labels = [] + + for idx in tqdm(range(len(dataset)), total=len(dataset)): + waveforms, metadata = dataset.get_sample(idx) + + if "split" in metadata: + trace_split = metadata["split"] + else: + trace_split = "" + + def checkphase(metadata, phase, npts): + return ( + phase in metadata + and not np.isnan(metadata[phase]) + and 0 <= metadata[phase] < npts + ) + + # Example entry: (1031, "P", "Pg") + arrivals = sorted( + [ + (metadata[phase], phase_label, phase.split("_")[1]) + for phase, phase_label in phase_dict.items() + if checkphase(metadata, phase, waveforms.shape[-1]) + ] + ) + + if len(arrivals) == 0: + # Trace has no arrivals + continue + + for i, (onset, phase, full_phase) in enumerate(arrivals): + if i == 0: + onset_before = 0 + else: + onset_before = int(arrivals[i - 1][0]) + int( + 0.5 * sampling_rate + ) # 0.5 s minimum spacing + + if i == len(arrivals) - 1: + onset_after = np.inf + else: + onset_after = int(arrivals[i + 1][0]) - int( + 0.5 * sampling_rate + ) # 0.5 s minimum spacing + + if ( + onset_after - onset_before < windowlen + or onset_before > onset + or onset_after < onset + ): + # Impossible to isolate pick + continue + + else: + onset_after = min(onset_after, waveforms.shape[-1]) + # Shift everything to a "virtual" start at onset_before + start_sample, end_sample = select_window_containing( + onset_after - onset_before, + windowlen=windowlen, + containing=onset - onset_before, + bounds=(50, 50), + ) + start_sample += onset_before + end_sample += onset_before + if end_sample - start_sample <= windowlen: + sample = { + "trace_name": metadata["trace_name"], + "trace_idx": idx, + "trace_split": trace_split, + "sampling_rate": sampling_rate, + "start_sample": start_sample, + "end_sample": end_sample, + "phase_label": phase, + "full_phase_label": full_phase, + "phase_onset": onset, + } + + labels += [sample] + + labels = pd.DataFrame(labels) + diff = labels["end_sample"] - labels["start_sample"] + labels = labels[diff > 100] + labels.to_csv(output / "task23.csv", index=False) + + +def select_window_containing(npts, windowlen, containing=None, bounds=(100, 100)): + """ + Selects a window from a larger trace. + + :param npts: Number of points of the full trace + :param windowlen: Desired windowlen + :param containing: Sample number that should be contained. If None, any window within the trace is valid. + :param bounds: The containing sample may not be in the first/last samples indicated here. + :return: Start sample, end_sample + """ + if npts <= windowlen: + # If npts is smaller than the window length, always return the full window + return 0, npts + + else: + if containing is None: + start_sample = np.random.randint(0, npts - windowlen + 1) + return start_sample, start_sample + windowlen + + else: + earliest_start = max(0, containing - windowlen + bounds[1]) + latest_start = min(npts - windowlen, containing - bounds[0]) + if latest_start <= earliest_start: + # Again, return full window + return 0, npts + + else: + start_sample = np.random.randint(earliest_start, latest_start + 1) + return start_sample, start_sample + windowlen + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Generate evaluation targets. See the docstring for details." + ) + parser.add_argument( + "dataset", type=str, help="Path to input dataset or SeisBench dataset name" + ) + parser.add_argument( + "output", type=str, help="Path to write target files to. Must not exist." + ) + parser.add_argument( + "--tasks", + type=str, + default="1,2,3", + help="Which tasks to generate data for. By default generates data for all tasks.", + ) + parser.add_argument( + "--sampling_rate", + type=float, + default=100, + help="Sampling rate in Hz to generate targets for.", + ) + parser.add_argument( + "--no_noise_before_events", + action="store_true", + help="If set, does not extract noise from windows before the first arrival.", + ) + args = parser.parse_args() + + main( + args.dataset, + args.output, + args.tasks, + args.sampling_rate, + not args.no_noise_before_events, + ) diff --git a/scripts/hyperparameter_sweep.py b/scripts/hyperparameter_sweep.py new file mode 100644 index 0000000..e85ec3d --- /dev/null +++ b/scripts/hyperparameter_sweep.py @@ -0,0 +1,178 @@ +# ----------------- +# Copyright © 2023 ACK Cyfronet AGH, Poland. +# This work was partially funded by EPOS Project funded in frame of PL-POIR4.2 +# ----------------- + +import os.path +import argparse +from pytorch_lightning.loggers import WandbLogger, CSVLogger +from pytorch_lightning.callbacks import ModelCheckpoint +from pytorch_lightning.callbacks.early_stopping import EarlyStopping +import pytorch_lightning as pl +import wandb +import torch +import traceback +import logging +from dotenv import load_dotenv +import models +import train +import util +from config_loader import config as common_config +from config_loader import models_path, dataset_name, seed, experiment_count + + +torch.multiprocessing.set_sharing_strategy('file_system') + +load_dotenv() +wandb_api_key = os.environ.get('WANDB_API_KEY') +if wandb_api_key is None: + raise ValueError("WANDB_API_KEY environment variable is not set.") +host = os.environ.get("WANDB_HOST") +if host is None: + raise ValueError("WANDB_HOST environment variable is not set.") + + +wandb.login(key=wandb_api_key, host=host) +# wandb.login(key=wandb_api_key) + +wandb_project_name = os.environ.get("WANDB_PROJECT") +wandb_user_name = os.environ.get("WANDB_USER") + +script_name = os.path.splitext(os.path.basename(__file__))[0] +logger = logging.getLogger(script_name) +logger.setLevel(logging.WARNING) + + +def set_random_seed(seed=3): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def get_trainer_args(config): + trainer_args = {'max_epochs': config.max_epochs[0]} + return trainer_args + + +class HyperparameterSweep: + def __init__(self, project_name, sweep_config): + self.project_name = project_name + self.sweep_config = sweep_config + self.sweep_id = None + + def run_sweep(self): + + # Create the sweep + self.sweep_id = wandb.sweep(self.sweep_config, project=self.project_name) + + logger.info("Created sweep with ID: " + self.sweep_id) + + # Run the sweep + wandb.agent(self.sweep_id, function=self.run_experiment, count=experiment_count) + + def all_runs_finished(self): + + sweep_path = f"{wandb_user_name}/{wandb_project_name}/{self.sweep_id}" + logger.debug(f"Sweep path: {sweep_path}") + sweep_runs = wandb.Api().sweep(sweep_path).runs + all_finished = all(run.state == "finished" for run in sweep_runs) + if all_finished: + logger.info("All runs finished successfully.") + + all_not_running = all(run.state != "running" for run in sweep_runs) + if all_not_running and not all_finished: + logger.warning("Some runs are not finished but failed or crashed.") + + return all_not_running + + def run_experiment(self): + + try: + + logger.debug("Starting a new run...") + run = wandb.init( + project=self.project_name, + config=common_config, + ) + + wandb.run.log_code( + ".", + include_fn=lambda path: path.endswith(os.path.basename(__file__)) + ) + + model_name = wandb.config.model_name[0] + model_args = models.get_model_specific_args(wandb.config) + logger.debug(f"Initializing {model_name}") + + model = models.__getattribute__(model_name + "Lit")(**model_args) + + train_loader, dev_loader = train.prepare_data(wandb.config, model, test_run=False) + + wandb_logger = WandbLogger(project=self.project_name, log_model="all") + wandb_logger.watch(model) + + # CSV logger - also used for saving configuration as yaml + experiment_name = f"{dataset_name}_{model_name}" + csv_logger = CSVLogger(models_path, experiment_name, version=run.id) + csv_logger.log_hyperparams(wandb.config) + + loggers = [wandb_logger, csv_logger] + + experiment_signature = f"{experiment_name}_sweep={self.sweep_id}-run={run.id}" + + logger.debug("Experiment signature: " + experiment_signature) + + checkpoint_callback = ModelCheckpoint( + save_top_k=1, + filename=experiment_signature + "-{epoch}-{val_loss:.3f}", + monitor="val_loss", + mode="min", + dirpath=f"{models_path}/{experiment_name}/", + ) # save_top_k=1, monitor="val_loss", mode="min": save the best model in terms of validation loss + checkpoint_callback.STARTING_VERSION = 1 + + early_stopping_callback = EarlyStopping( + monitor="val_loss", + patience=3, + verbose=True, + mode="min") + callbacks = [checkpoint_callback, early_stopping_callback] + + trainer = pl.Trainer( + default_root_dir=models_path, + logger=loggers, + callbacks=callbacks, + **get_trainer_args(wandb.config) + ) + + trainer.fit(model, train_loader, dev_loader) + + except Exception as e: + logger.error("caught error: ", str(e)) + traceback_str = traceback.format_exc() + logger.error(traceback_str) + + run.finish() + + +def start_sweep(sweep_config): + + logger.info("Starting sweep with config: " + str(sweep_config)) + set_random_seed(seed) + sweep_runner = HyperparameterSweep(project_name=wandb_project_name, sweep_config=sweep_config) + sweep_runner.run_sweep() + + return sweep_runner + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + parser.add_argument("--sweep_config", type=str, required=True) + args = parser.parse_args() + + sweep_config = util.load_sweep_config(args.sweep_config) + start_sweep(sweep_config) + diff --git a/scripts/models.py b/scripts/models.py new file mode 100644 index 0000000..bcb99dd --- /dev/null +++ b/scripts/models.py @@ -0,0 +1,1138 @@ +""" +This file contains the model specifications. +""" + +import seisbench.models as sbm +import seisbench.generate as sbg + +import pytorch_lightning as pl +import torch +import torch.nn.functional as F +import numpy as np +from abc import abstractmethod, ABC + +# Allows to import this file in both jupyter notebook and code +try: + from .augmentations import DuplicateEvent +except ImportError: + from augmentations import DuplicateEvent + + +# Phase dict for labelling. We only study P and S phases without differentiating between them. +phase_dict = { + "trace_p_arrival_sample": "P", + "trace_pP_arrival_sample": "P", + "trace_P_arrival_sample": "P", + "trace_P1_arrival_sample": "P", + "trace_Pg_arrival_sample": "P", + "trace_Pn_arrival_sample": "P", + "trace_PmP_arrival_sample": "P", + "trace_pwP_arrival_sample": "P", + "trace_pwPm_arrival_sample": "P", + "trace_s_arrival_sample": "S", + "trace_S_arrival_sample": "S", + "trace_S1_arrival_sample": "S", + "trace_Sg_arrival_sample": "S", + "trace_SmS_arrival_sample": "S", + "trace_Sn_arrival_sample": "S", +} + + +def vector_cross_entropy(y_pred, y_true, eps=1e-5): + """ + Cross entropy loss + + :param y_true: True label probabilities + :param y_pred: Predicted label probabilities + :param eps: Epsilon to clip values for stability + :return: Average loss across batch + """ + h = y_true * torch.log(y_pred + eps) + if y_pred.ndim == 3: + h = h.mean(-1).sum( + -1 + ) # Mean along sample dimension and sum along pick dimension + else: + h = h.sum(-1) # Sum along pick dimension + h = h.mean() # Mean over batch axis + return -h + + +class SeisBenchModuleLit(pl.LightningModule, ABC): + """ + Abstract interface for SeisBench lightning modules. + Adds generic function, e.g., get_augmentations + """ + + @abstractmethod + def get_augmentations(self): + """ + Returns a list of augmentations that can be passed to the seisbench.generate.GenericGenerator + + :return: List of augmentations + """ + pass + + def get_train_augmentations(self): + """ + Returns the set of training augmentations. + """ + return self.get_augmentations() + + def get_val_augmentations(self): + """ + Returns the set of validation augmentations for validations during training. + """ + return self.get_augmentations() + + @abstractmethod + def get_eval_augmentations(self): + """ + Returns the set of evaluation augmentations for evaluation after training. + These augmentations will be passed to a SteeredGenerator and should usually contain a steered window. + """ + pass + + @abstractmethod + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + """ + Predict step for the lightning module. Returns results for three tasks: + + - earthquake detection (score, higher means more likely detection) + - P to S phase discrimination (score, high means P, low means S) + - phase location in samples (two integers, first for P, second for S wave) + + All predictions should only take the window defined by batch["window_borders"] into account. + + :param batch: + :return: + """ + score_detection = None + score_p_or_s = None + p_sample = None + s_sample = None + return score_detection, score_p_or_s, p_sample, s_sample + + +class PhaseNetLit(SeisBenchModuleLit): + """ + LightningModule for PhaseNet + + :param lr: Learning rate, defaults to 1e-2 + :param sigma: Standard deviation passed to the ProbabilisticPickLabeller + :param sample_boundaries: Low and high boundaries for the RandomWindow selection. + :param kwargs: Kwargs are passed to the SeisBench.models.PhaseNet constructor. + """ + + def __init__(self, lr=1e-2, sigma=20, sample_boundaries=(None, None), **kwargs): + super().__init__() + self.save_hyperparameters() + self.lr = lr + self.sigma = sigma + self.sample_boundaries = sample_boundaries + self.loss = vector_cross_entropy + self.model = sbm.PhaseNet(phases="PN", **kwargs) + + def forward(self, x): + return self.model(x) + + def shared_step(self, batch): + x = batch["X"] + y_true = batch["y"] + y_pred = self.model(x) + return self.loss(y_pred, y_true) + + def training_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("train_loss", loss) + return loss + + def validation_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("val_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + def get_augmentations(self): + return [ + # In 2/3 of the cases, select windows around picks, to reduce amount of noise traces in training. + # Uses strategy variable, as padding will be handled by the random window. + # In 1/3 of the cases, just returns the original trace, to keep diversity high. + sbg.OneOf( + [ + sbg.WindowAroundSample( + list(phase_dict.keys()), + samples_before=3000, + windowlen=6000, + selection="random", + strategy="variable", + ), + sbg.NullAugmentation(), + ], + probabilities=[2, 1], + ), + sbg.RandomWindow( + low=self.sample_boundaries[0], + high=self.sample_boundaries[1], + windowlen=3001, + strategy="pad", + ), + sbg.ChangeDtype(np.float32), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + sbg.ProbabilisticLabeller( + label_columns=phase_dict, sigma=self.sigma, dim=0 + ), + ] + + def get_eval_augmentations(self): + return [ + sbg.SteeredWindow(windowlen=3001, strategy="pad"), + sbg.ChangeDtype(np.float32), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + ] + + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + x = batch["X"] + window_borders = batch["window_borders"] + + pred = self.model(x) + + score_detection = torch.zeros(pred.shape[0]) + score_p_or_s = torch.zeros(pred.shape[0]) + p_sample = torch.zeros(pred.shape[0], dtype=int) + s_sample = torch.zeros(pred.shape[0], dtype=int) + + for i in range(pred.shape[0]): + start_sample, end_sample = window_borders[i] + local_pred = pred[i, :, start_sample:end_sample] + + score_detection[i] = torch.max(1 - local_pred[-1]) # 1 - noise + score_p_or_s[i] = torch.max(local_pred[0]) / torch.max( + local_pred[1] + ) # most likely P by most likely S + + p_sample[i] = torch.argmax(local_pred[0]) + s_sample[i] = torch.argmax(local_pred[1]) + + return score_detection, score_p_or_s, p_sample, s_sample + + +class GPDLit(SeisBenchModuleLit): + """ + LightningModule for GPD + + :param lr: Learning rate, defaults to 1e-3 + :param sigma: Standard deviation passed to the ProbabilisticPickLabeller. If not, uses determinisic labels, + i.e., whether a pick is contained. + :param highpass: If not None, cutoff frequency for highpass filter in Hz. + :param lowpass: If not None, cutoff frequency for lowpass filter in Hz. + :param kwargs: Kwargs are passed to the SeisBench.models.GPD constructor. + """ + + def __init__(self, lr=1e-3, highpass=None, lowpass=None, sigma=None, **kwargs): + super().__init__() + self.save_hyperparameters() + self.lr = lr + self.sigma = sigma + self.model = sbm.GPD(**kwargs) + if sigma is None: + self.nllloss = torch.nn.NLLLoss() + self.loss = self.nll_with_probabilities + else: + self.loss = vector_cross_entropy + self.highpass = highpass + self.lowpass = lowpass + self.predict_stride = 5 + + def nll_with_probabilities(self, y_pred, y_true): + y_pred = torch.log(y_pred) + return self.nllloss(y_pred, y_true) + + def forward(self, x): + return self.model(x) + + def shared_step(self, batch): + x = batch["X"] + y_true = batch["y"].squeeze() + y_pred = self.model(x) + return self.loss(y_pred, y_true) + + def training_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("train_loss", loss) + return loss + + def validation_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("val_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + def get_augmentations(self): + filter = [] + if self.highpass is not None: + filter = [sbg.Filter(1, self.highpass, "highpass")] + if self.lowpass is not None: + filter += [sbg.Filter(1, self.lowpass, "lowpass")] + + if self.sigma is None: + labeller = sbg.StandardLabeller( + label_columns=phase_dict, + on_overlap="fixed-relevance", + low=100, + high=-100, + ) + else: + labeller = sbg.ProbabilisticPointLabeller( + label_columns=phase_dict, position=0.5, sigma=self.sigma + ) + + return ( + [ + # In 2/3 of the cases, select windows around picks, to reduce amount of noise traces in training. + # Uses strategy variable, as padding will be handled by the random window. + # In 1/3 of the cases, just returns the original trace, to keep diversity high. + sbg.OneOf( + [ + sbg.WindowAroundSample( + list(phase_dict.keys()), + samples_before=400, + windowlen=800, + selection="random", + strategy="variable", + ), + sbg.NullAugmentation(), + ], + probabilities=[2, 1], + ), + sbg.RandomWindow( + windowlen=400, + strategy="pad", + ), + sbg.Normalize(detrend_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + labeller, + ] + + filter + + [sbg.ChangeDtype(np.float32)] + ) + + def get_eval_augmentations(self): + filter = [] + if self.highpass is not None: + filter = [sbg.Filter(1, self.highpass, "highpass")] + if self.lowpass is not None: + filter += [sbg.Filter(1, self.lowpass, "lowpass")] + + return [ + # Larger window length ensures a sliding window covering full trace can be applied + sbg.SteeredWindow(windowlen=3400, strategy="pad"), + sbg.SlidingWindow(timestep=self.predict_stride, windowlen=400), + sbg.Normalize(detrend_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + *filter, + sbg.ChangeDtype(np.float32), + ] + + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + x = batch["X"] + window_borders = batch["window_borders"] + + shape_save = x.shape + x = x.reshape( + (-1,) + shape_save[2:] + ) # Merge batch and sliding window dimensions + pred = self.model(x) + pred = pred.reshape(shape_save[:2] + (-1,)) + pred = torch.repeat_interleave( + pred, self.predict_stride, dim=1 + ) # Counteract stride + pred = F.pad(pred, (0, 0, 200, 200)) + pred = pred.permute(0, 2, 1) + + # Otherwise windows shorter 30 s will automatically produce detections + pred[:, 2, :200] = 1 + pred[:, 2, -200:] = 1 + + score_detection = torch.zeros(pred.shape[0]) + score_p_or_s = torch.zeros(pred.shape[0]) + p_sample = torch.zeros(pred.shape[0], dtype=int) + s_sample = torch.zeros(pred.shape[0], dtype=int) + + for i in range(pred.shape[0]): + start_sample, end_sample = window_borders[i] + local_pred = pred[i, :, start_sample:end_sample] + + score_detection[i] = torch.max(1 - local_pred[-1]) # 1 - noise + score_p_or_s[i] = torch.max(local_pred[0]) / torch.max( + local_pred[1] + ) # most likely P by most likely S + + # Adjust for prediction stride by choosing the sample in the middle of each block + p_sample[i] = torch.argmax(local_pred[0]) + self.predict_stride // 2 + s_sample[i] = torch.argmax(local_pred[1]) + self.predict_stride // 2 + + return score_detection, score_p_or_s, p_sample, s_sample + + def pick_mae(self, batch, batch_idx): + x = batch["X"] + window_borders = batch["window_borders"] + + shape_save = x.shape + x = x.reshape( + (-1,) + shape_save[2:] + ) # Merge batch and sliding window dimensions + pred = self.model(x) + pred = pred.reshape(shape_save[:2] + (-1,)) + pred = torch.repeat_interleave( + pred, self.predict_stride, dim=1 + ) # Counteract stride + pred = F.pad(pred, (0, 0, 200, 200)) + pred = pred.permute(0, 2, 1) + + # Otherwise windows shorter 30 s will automatically produce detections + pred[:, 2, :200] = 1 + pred[:, 2, -200:] = 1 + + score_detection = torch.zeros(pred.shape[0]) + score_p_or_s = torch.zeros(pred.shape[0]) + p_sample = torch.zeros(pred.shape[0], dtype=int) + s_sample = torch.zeros(pred.shape[0], dtype=int) + + for i in range(pred.shape[0]): + start_sample, end_sample = window_borders[i] + local_pred = pred[i, :, start_sample:end_sample] + + score_detection[i] = torch.max(1 - local_pred[-1]) # 1 - noise + score_p_or_s[i] = torch.max(local_pred[0]) / torch.max( + local_pred[1] + ) # most likely P by most likely S + + # Adjust for prediction stride by choosing the sample in the middle of each block + p_sample[i] = torch.argmax(local_pred[0]) + self.predict_stride // 2 + s_sample[i] = torch.argmax(local_pred[1]) + self.predict_stride // 2 + + return score_detection, score_p_or_s, p_sample, s_sample + + +class EQTransformerLit(SeisBenchModuleLit): + """ + LightningModule for EQTransformer + + :param lr: Learning rate, defaults to 1e-2 + :param sigma: Standard deviation passed to the ProbabilisticPickLabeller + :param sample_boundaries: Low and high boundaries for the RandomWindow selection. + :param loss_weights: Loss weights for detection, P and S phase. + :param rotate_array: If true, rotate array along sample axis. + :param detection_fixed_window: Passed as parameter fixed_window to detection + :param kwargs: Kwargs are passed to the SeisBench.models.EQTransformer constructor. + """ + + def __init__( + self, + lr=1e-2, + sigma=20, + sample_boundaries=(None, None), + loss_weights=(0.05, 0.40, 0.55), + rotate_array=False, + detection_fixed_window=None, + **kwargs, + ): + super().__init__() + self.save_hyperparameters() + self.lr = lr + self.sigma = sigma + self.sample_boundaries = sample_boundaries + self.loss = torch.nn.BCELoss() + self.loss_weights = loss_weights + self.rotate_array = rotate_array + self.detection_fixed_window = detection_fixed_window + self.model = sbm.EQTransformer(**kwargs) + + def forward(self, x): + return self.model(x) + + def shared_step(self, batch): + x = batch["X"] + p_true = batch["y"][:, 0] + s_true = batch["y"][:, 1] + det_true = batch["detections"][:, 0] + det_pred, p_pred, s_pred = self.model(x) + + return ( + self.loss_weights[0] * self.loss(det_pred, det_true) + + self.loss_weights[1] * self.loss(p_pred, p_true) + + self.loss_weights[2] * self.loss(s_pred, s_true) + ) + + def training_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("train_loss", loss) + return loss + + def validation_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("val_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + def get_joint_augmentations(self): + p_phases = [key for key, val in phase_dict.items() if val == "P"] + s_phases = [key for key, val in phase_dict.items() if val == "S"] + + if self.detection_fixed_window is not None: + detection_labeller = sbg.DetectionLabeller( + p_phases, + fixed_window=self.detection_fixed_window, + key=("X", "detections"), + ) + else: + detection_labeller = sbg.DetectionLabeller( + p_phases, s_phases=s_phases, key=("X", "detections") + ) + + block1 = [ + # In 2/3 of the cases, select windows around picks, to reduce amount of noise traces in training. + # Uses strategy variable, as padding will be handled by the random window. + # In 1/3 of the cases, just returns the original trace, to keep diversity high. + sbg.OneOf( + [ + sbg.WindowAroundSample( + list(phase_dict.keys()), + samples_before=6000, + windowlen=12000, + selection="random", + strategy="variable", + ), + sbg.NullAugmentation(), + ], + probabilities=[2, 1], + ), + sbg.RandomWindow( + low=self.sample_boundaries[0], + high=self.sample_boundaries[1], + windowlen=6000, + strategy="pad", + ), + sbg.ProbabilisticLabeller( + label_columns=phase_dict, sigma=self.sigma, dim=0 + ), + detection_labeller, + # Normalize to ensure correct augmentation behavior + sbg.Normalize(detrend_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + ] + + block2 = [ + sbg.ChangeDtype(np.float32, "X"), + sbg.ChangeDtype(np.float32, "y"), + sbg.ChangeDtype(np.float32, "detections"), + ] + + return block1, block2 + + def get_train_augmentations(self): + if self.rotate_array: + rotation_block = [ + sbg.OneOf( + [ + sbg.RandomArrayRotation(["X", "y", "detections"]), + sbg.NullAugmentation(), + ], + [0.99, 0.01], + ) + ] + else: + rotation_block = [] + + augmentation_block = [ + # Add secondary event + sbg.OneOf( + [DuplicateEvent(label_keys="y"), sbg.NullAugmentation()], + probabilities=[0.3, 0.7], + ), + # Gaussian noise + sbg.OneOf([sbg.GaussianNoise(), sbg.NullAugmentation()], [0.5, 0.5]), + # Array rotation + *rotation_block, + # Gaps + sbg.OneOf([sbg.AddGap(), sbg.NullAugmentation()], [0.2, 0.8]), + # Channel dropout + sbg.OneOf([sbg.ChannelDropout(), sbg.NullAugmentation()], [0.3, 0.7]), + # Augmentations make second normalize necessary + sbg.Normalize(detrend_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + ] + + block1, block2 = self.get_joint_augmentations() + + return block1 + augmentation_block + block2 + + def get_val_augmentations(self): + block1, block2 = self.get_joint_augmentations() + + return block1 + block2 + + def get_augmentations(self): + raise NotImplementedError("Use get_train/val_augmentations instead.") + + def get_eval_augmentations(self): + return [ + sbg.SteeredWindow(windowlen=6000, strategy="pad"), + sbg.ChangeDtype(np.float32), + sbg.Normalize(detrend_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + ] + + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + x = batch["X"] + window_borders = batch["window_borders"] + + det_pred, p_pred, s_pred = self.model(x) + + score_detection = torch.zeros(det_pred.shape[0]) + score_p_or_s = torch.zeros(det_pred.shape[0]) + p_sample = torch.zeros(det_pred.shape[0], dtype=int) + s_sample = torch.zeros(det_pred.shape[0], dtype=int) + + for i in range(det_pred.shape[0]): + start_sample, end_sample = window_borders[i] + local_det_pred = det_pred[i, start_sample:end_sample] + local_p_pred = p_pred[i, start_sample:end_sample] + local_s_pred = s_pred[i, start_sample:end_sample] + + score_detection[i] = torch.max(local_det_pred) + score_p_or_s[i] = torch.max(local_p_pred) / torch.max( + local_s_pred + ) # most likely P by most likely S + + p_sample[i] = torch.argmax(local_p_pred) + s_sample[i] = torch.argmax(local_s_pred) + + return score_detection, score_p_or_s, p_sample, s_sample + + +class CREDLit(SeisBenchModuleLit): + """ + LightningModule for CRED + + :param lr: Learning rate, defaults to 1e-2 + :param sample_boundaries: Low and high boundaries for the RandomWindow selection. + :param kwargs: Kwargs are passed to the SeisBench.models.CRED constructor. + """ + + def __init__( + self, + lr=1e-2, + sample_boundaries=(None, None), + detection_fixed_window=None, + **kwargs, + ): + super().__init__() + self.save_hyperparameters() + self.lr = lr + self.sample_boundaries = sample_boundaries + self.detection_fixed_window = detection_fixed_window + self.loss = torch.nn.BCELoss() + self.model = sbm.CRED(**kwargs) + + def forward(self, x): + return self.model(x) + + def shared_step(self, batch): + x = batch["spec"] + y_true = batch["y"][:, 0] + y_pred = self.model(x)[:, :, 0] + + return self.loss(y_pred, y_true) + + def training_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("train_loss", loss) + return loss + + def validation_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("val_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + def get_augmentations(self): + p_phases = [key for key, val in phase_dict.items() if val == "P"] + s_phases = [key for key, val in phase_dict.items() if val == "S"] + + def spectrogram(state_dict): + x, metadata = state_dict["X"] + spec = self.model.waveforms_to_spectrogram(x) + state_dict["spec"] = (spec, metadata) + + def resample_detections(state_dict): + # Resample detections to 19 samples as in the output of CRED + # Each sample represents the average over 158 original samples + y, metadata = state_dict["y"] + y = np.pad(y, [(0, 0), (0, 2)], mode="constant", constant_values=0) + y = np.reshape(y, (1, 19, 158)) + y = np.mean(y, axis=-1) + state_dict["y"] = (y, metadata) + + if self.detection_fixed_window is not None: + detection_labeller = sbg.DetectionLabeller( + p_phases, fixed_window=self.detection_fixed_window + ) + else: + detection_labeller = sbg.DetectionLabeller(p_phases, s_phases=s_phases) + + augmentations = [ + # In 2/3 of the cases, select windows around picks, to reduce amount of noise traces in training. + # Uses strategy variable, as padding will be handled by the random window. + # In 1/3 of the cases, just returns the original trace, to keep diversity high. + sbg.OneOf( + [ + sbg.WindowAroundSample( + list(phase_dict.keys()), + samples_before=3000, + windowlen=6000, + selection="random", + strategy="variable", + ), + sbg.NullAugmentation(), + ], + probabilities=[2, 1], + ), + sbg.RandomWindow( + low=self.sample_boundaries[0], + high=self.sample_boundaries[1], + windowlen=3000, + strategy="pad", + ), + detection_labeller, + # Normalize to ensure correct augmentation behavior + sbg.Normalize(detrend_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + spectrogram, + resample_detections, + sbg.ChangeDtype(np.float32, "y"), + sbg.ChangeDtype(np.float32, "spec"), + ] + + return augmentations + + def get_eval_augmentations(self): + def spectrogram(state_dict): + x, metadata = state_dict["X"] + spec = self.model.waveforms_to_spectrogram(x) + state_dict["spec"] = (spec, metadata) + + return [ + sbg.SteeredWindow(windowlen=3000, strategy="pad"), + sbg.Normalize(detrend_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + spectrogram, + sbg.ChangeDtype(np.float32, "spec"), + ] + + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + x = batch["spec"] + window_borders = batch["window_borders"] + + pred = self.model(x) + + score_detection = torch.zeros(pred.shape[0]) + score_p_or_s = torch.zeros(pred.shape[0]) * np.nan + p_sample = torch.zeros(pred.shape[0], dtype=int) * np.nan + s_sample = torch.zeros(pred.shape[0], dtype=int) * np.nan + + for i in range(pred.shape[0]): + start_sample, end_sample = window_borders[i] + # We go for a slightly broader window, i.e., all output prediction points encompassing the target window + start_resampled = start_sample // 158 + end_resampled = end_sample // 158 + 1 + local_pred = pred[i, start_resampled:end_resampled, 0] + + score_detection[i] = torch.max(local_pred) # 1 - noise + + return score_detection, score_p_or_s, p_sample, s_sample + + +class BasicPhaseAELit(SeisBenchModuleLit): + """ + LightningModule for BasicPhaseAE + + :param lr: Learning rate, defaults to 1e-2 + :param sigma: Standard deviation passed to the ProbabilisticPickLabeller + :param sample_boundaries: Low and high boundaries for the RandomWindow selection. + :param kwargs: Kwargs are passed to the SeisBench.models.BasicPhaseAE constructor. + """ + + def __init__(self, lr=1e-2, sigma=20, sample_boundaries=(None, None), **kwargs): + super().__init__() + self.save_hyperparameters() + self.lr = lr + self.sigma = sigma + self.sample_boundaries = sample_boundaries + self.loss = vector_cross_entropy + self.model = sbm.BasicPhaseAE(**kwargs) + + def forward(self, x): + return self.model(x) + + def shared_step(self, batch): + x = batch["X"] + y_true = batch["y"] + y_pred = self.model(x) + return self.loss(y_pred, y_true) + + def training_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("train_loss", loss) + return loss + + def validation_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("val_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + def get_augmentations(self): + return [ + # In 2/3 of the cases, select windows around picks, to reduce amount of noise traces in training. + # Uses strategy variable, as padding will be handled by the random window. + # In 1/3 of the cases, just returns the original trace, to keep diversity high. + sbg.OneOf( + [ + sbg.WindowAroundSample( + list(phase_dict.keys()), + samples_before=700, + windowlen=700, + selection="random", + strategy="variable", + ), + sbg.NullAugmentation(), + ], + probabilities=[2, 1], + ), + sbg.RandomWindow( + low=self.sample_boundaries[0], + high=self.sample_boundaries[1], + windowlen=600, + strategy="pad", + ), + sbg.ChangeDtype(np.float32), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + sbg.ProbabilisticLabeller( + label_columns=phase_dict, sigma=self.sigma, dim=0 + ), + ] + + def get_eval_augmentations(self): + return [ + sbg.SteeredWindow(windowlen=3000, strategy="pad"), + sbg.ChangeDtype(np.float32), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + ] + + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + x = batch["X"] + window_borders = batch["window_borders"] + + # Create overlapping windows + re = torch.zeros(x.shape[:2] + (7, 600), dtype=x.dtype, device=x.device) + for i, start in enumerate(range(0, 2401, 400)): + re[:, :, i] = x[:, :, start : start + 600] + x = re + + x = x.permute(0, 2, 1, 3) # --> (batch, windows, channels, samples) + shape_save = x.shape + x = x.reshape(-1, 3, 600) # --> (batch * windows, channels, samples) + window_pred = self.model(x) + window_pred = window_pred.reshape( + shape_save[:2] + (3, 600) + ) # --> (batch, window, channels, samples) + + # Connect predictions again, ignoring first and last second of each prediction + pred = torch.zeros((window_pred.shape[0], window_pred.shape[2], 3000)) + for i, start in enumerate(range(0, 2401, 400)): + if start == 0: + # Use full window (for start==0, the end will be overwritten) + pred[:, :, start : start + 600] = window_pred[:, i] + else: + pred[:, :, start + 100 : start + 600] = window_pred[:, i, :, 100:] + + score_detection = torch.zeros(pred.shape[0]) + score_p_or_s = torch.zeros(pred.shape[0]) + p_sample = torch.zeros(pred.shape[0], dtype=int) + s_sample = torch.zeros(pred.shape[0], dtype=int) + + for i in range(pred.shape[0]): + start_sample, end_sample = window_borders[i] + local_pred = pred[i, :, start_sample:end_sample] + + score_detection[i] = torch.max(1 - local_pred[-1]) # 1 - noise + score_p_or_s[i] = torch.max(local_pred[0]) / torch.max( + local_pred[1] + ) # most likely P by most likely S + + p_sample[i] = torch.argmax(local_pred[0]) + s_sample[i] = torch.argmax(local_pred[1]) + + return score_detection, score_p_or_s, p_sample, s_sample + + +class DPPDetectorLit(SeisBenchModuleLit): + """ + LightningModule for DPPDetector + + :param lr: Learning rate, defaults to 1e-2 + :param kwargs: Kwargs are passed to the SeisBench.models.PhaseNet constructor. + """ + + def __init__(self, lr=1e-3, **kwargs): + super().__init__() + self.save_hyperparameters() + self.lr = lr + self.nllloss = torch.nn.NLLLoss() + self.loss = self.nll_with_probabilities + self.model = sbm.DPPDetector(**kwargs) + + def forward(self, x): + return self.model(x) + + def nll_with_probabilities(self, y_pred, y_true): + y_pred = torch.log(y_pred + 1e-5) + return self.nllloss(y_pred, y_true) + + def shared_step(self, batch): + x = batch["X"] + y_true = batch["y"].squeeze() + y_pred = self.model(x) + return self.loss(y_pred, y_true) + + def training_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("train_loss", loss) + return loss + + def validation_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("val_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + def get_augmentations(self): + return [ + # In 2/3 of the cases, select windows around picks, to reduce amount of noise traces in training. + # Uses strategy variable, as padding will be handled by the random window. + # In 1/3 of the cases, just returns the original trace, to keep diversity high. + sbg.OneOf( + [ + sbg.WindowAroundSample( + list(phase_dict.keys()), + samples_before=650, + windowlen=1300, + selection="random", + strategy="variable", + ), + sbg.NullAugmentation(), + ], + probabilities=[2, 1], + ), + sbg.RandomWindow( + windowlen=500, + strategy="pad", + ), + sbg.ChangeDtype(np.float32), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + sbg.StandardLabeller( + label_columns=phase_dict, on_overlap="fixed-relevance" + ), + ] + + def get_eval_augmentations(self): + return [ + sbg.SteeredWindow(windowlen=3000, strategy="pad"), + sbg.ChangeDtype(np.float32), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + ] + + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + x = batch["X"] + window_borders = batch["window_borders"] + + # Create windows + x = x.reshape(x.shape[:-1] + (6, 500)) # Split into 6 windows of length 500 + x = x.permute(0, 2, 1, 3) # --> (batch, windows, channels, samples) + shape_save = x.shape + x = x.reshape(-1, 3, 500) # --> (batch * windows, channels, samples) + pred = self.model(x) + pred = pred.reshape(shape_save[:2] + (-1,)) # --> (batch, windows, label) + + score_detection = torch.zeros(pred.shape[0]) + score_p_or_s = torch.zeros(pred.shape[0]) + p_sample = torch.zeros(pred.shape[0], dtype=int) * np.nan + s_sample = torch.zeros(pred.shape[0], dtype=int) * np.nan + + for i in range(pred.shape[0]): + start_sample, end_sample = window_borders[i] + start_resampled = start_sample.cpu() // 500 + end_resampled = int(np.ceil(end_sample.cpu() / 500)) + local_pred = pred[i, start_resampled:end_resampled, :] + + score_p_or_s[i] = torch.max(local_pred[:, 0]) / torch.max(local_pred[:, 1]) + score_detection[i] = torch.max(1 - local_pred[:, -1]) + + return score_detection, score_p_or_s, p_sample, s_sample + + +class DPPPickerLit(SeisBenchModuleLit): + """ + LightningModule for DPPPicker + + :param lr: Learning rate, defaults to 1e-2 + :param kwargs: Kwargs are passed to the SeisBench.models.PhaseNet constructor. + """ + + def __init__(self, mode, lr=1e-3, **kwargs): + super().__init__() + self.save_hyperparameters() + self.mode = mode + self.lr = lr + self.loss = torch.nn.BCELoss() + print(mode, lr, **kwargs) + self.model = sbm.DPPPicker(mode=mode, **kwargs) + + def forward(self, x): + if self.mode == "P": + x = x[:, 0:1] # Select vertical component + elif self.mode == "S": + x = x[:, 1:3] # Select horizontal components + else: + raise ValueError(f"Unknown mode {self.mode}") + + return self.model(x) + + def shared_step(self, batch): + x = batch["X"] + y_true = batch["y"] + + if self.mode == "P": + y_true = y_true[:, 0] # P wave + x = x[:, 0:1] # Select vertical component + elif self.mode == "S": + y_true = y_true[:, 1] # S wave + x = x[:, 1:3] # Select horizontal components + else: + raise ValueError(f"Unknown mode {self.mode}") + + y_pred = self.model(x) + + loss = self.loss(y_pred, y_true) + return loss + + def training_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("train_loss", loss) + return loss + + def validation_step(self, batch, batch_idx): + loss = self.shared_step(batch) + self.log("val_loss", loss) + return loss + + def configure_optimizers(self): + optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) + return optimizer + + def get_augmentations(self): + return [ + sbg.WindowAroundSample( + [key for key, val in phase_dict.items() if val == self.mode], + samples_before=1000, + windowlen=2000, + selection="random", + strategy="variable", + ), + sbg.RandomWindow( + windowlen=1000, + strategy="pad", + ), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + sbg.StepLabeller(label_columns=phase_dict), + sbg.ChangeDtype(np.float32), + sbg.ChangeDtype(np.float32, "y"), + ] + + def get_eval_augmentations(self): + return [ + sbg.SteeredWindow(windowlen=1000, strategy="pad"), + sbg.ChangeDtype(np.float32), + sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), + ] + + def predict_step(self, batch, batch_idx=None, dataloader_idx=None): + x = batch["X"] + window_borders = batch["window_borders"] + + pred = self(x) + + score_detection = torch.zeros(pred.shape[0]) * np.nan + score_p_or_s = torch.zeros(pred.shape[0]) * np.nan + p_sample = torch.zeros(pred.shape[0], dtype=int) * np.nan + s_sample = torch.zeros(pred.shape[0], dtype=int) * np.nan + + for i in range(pred.shape[0]): + start_sample, end_sample = window_borders[i] + local_pred = pred[i, start_sample:end_sample] + + if (local_pred > 0.5).any(): + sample = torch.argmax( + (local_pred > 0.5).float() + ) # First sample exceeding 0.5 + else: + sample = 500 # Simply guess the middle + + if self.mode == "P": + p_sample[i] = sample + elif self.mode == "S": + s_sample[i] = sample + else: + raise ValueError(f"Unknown mode {self.mode}") + + return score_detection, score_p_or_s, p_sample, s_sample + + +def get_model_specific_args(config): + + model = config.model_name[0] + lr = config.learning_rate + if type(lr) == list: + lr = lr[0] + + args = {'lr': lr} + + match model: + case 'GPD': + if 'highpass' in config: + args['highpass'] = config.highpass + if 'lowpass' in config: + args['lowpass'] = config.lowpass[0] + case 'PhaseNet': + if 'sample_boundaries' in config: + args['sample_boundaries'] = config.sample_boundaries + case 'DPPPicker': + if 'mode' in config: + args['mode'] = config.mode[0] + + print("args", args, model) + + return args diff --git a/scripts/pipeline.py b/scripts/pipeline.py new file mode 100644 index 0000000..30a4679 --- /dev/null +++ b/scripts/pipeline.py @@ -0,0 +1,92 @@ +""" +----------------- +Copyright © 2023 ACK Cyfronet AGH, Poland. +This work was partially funded by EPOS Project funded in frame of PL-POIR4.2 +----------------- + +This script runs the pipeline for the training and evaluation of the models. +""" +import logging +import time +import argparse + +import util +import generate_eval_targets +import hyperparameter_sweep +import eval +import collect_results +from config_loader import data_path, targets_path, sampling_rate, dataset_name, sweep_files + +logger = logging.getLogger('pipeline') +logger.setLevel(logging.INFO) + + +def load_sweep_config(model_name, args): + + if model_name == "PhaseNet" and args.phasenet_config is not None: + sweep_fname = args.phasenet_config + elif model_name == "GPD" and args.gpd_config is not None: + sweep_fname = args.gpd_config + else: + # use the default sweep config for the model + sweep_fname = sweep_files[model_name] + + logger.info(f"Loading sweep config: {sweep_fname}") + + return util.load_sweep_config(sweep_fname) + + +def find_the_best_params(model_name, args): + + # find the best hyperparams for the model_name + logger.info(f"Starting searching for the best hyperparams for the model: {model_name}") + + sweep_config = load_sweep_config(model_name, args) + sweep_runner = hyperparameter_sweep.start_sweep(sweep_config) + + # wait for all runs to finish + + all_finished = sweep_runner.all_runs_finished() + while not all_finished: + logger.info("Waiting for sweep runs to finish...") + # Sleep for a few seconds before checking again + time.sleep(30) + all_finished = sweep_runner.all_runs_finished() + + logger.info(f"Finished the sweep: {sweep_runner.sweep_id}") + return sweep_runner.sweep_id + + +def generate_predictions(sweep_id, model_name): + experiment_name = f"{dataset_name}_{model_name}" + eval.main(weights=experiment_name, + targets=targets_path, + sets='dev,test', + batchsize=128, + num_workers=4, + # sampling_rate=sampling_rate, + sweep_id=sweep_id + ) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--phasenet_config", type=str, required=False) + parser.add_argument("--gpd_config", type=str, required=False) + args = parser.parse_args() + + # generate labels + generate_eval_targets.main(data_path, targets_path, "2,3", sampling_rate, None) + + # find the best hyperparams for the models + for model_name in ["GPD", "PhaseNet"]: + sweep_id = find_the_best_params(model_name, args) + generate_predictions(sweep_id, model_name) + + # collect results + collect_results.traverse_path("pred", "pred/results.csv") + + +if __name__ == "__main__": + main() + diff --git a/scripts/train.py b/scripts/train.py index 2ae6909..1836d35 100644 --- a/scripts/train.py +++ b/scripts/train.py @@ -1,339 +1,245 @@ -import os.path -import wandb -import seisbench.data as sbd +""" +This script handles the training of models base on model configuration files. +""" + import seisbench.generate as sbg -import seisbench.models as sbm from seisbench.util import worker_seeding -import numpy as np -import torch -from torch.utils.data import DataLoader -import torch.nn.functional as f -import torch.nn as nn -from torchmetrics import Metric -from torch import Tensor, tensor + +import pytorch_lightning as pl +# from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger +from pytorch_lightning.loggers import WandbLogger, CSVLogger + +# https://github.com/Lightning-AI/lightning/pull/12554 +# https://github.com/Lightning-AI/lightning/issues/11796 +from pytorch_lightning.callbacks import ModelCheckpoint +import argparse import json -from dotenv import load_dotenv +import numpy as np +from torch.utils.data import DataLoader +import torch +import os +import logging +from pathlib import Path -load_dotenv() -wandb_api_key = os.environ.get('WANDB_API_KEY') -if wandb_api_key is None: - raise ValueError("WANDB_API_KEY environment variable is not set.") +import models, data, util +import time +import datetime +import wandb +# +# load_dotenv() +# wandb_api_key = os.environ.get('WANDB_API_KEY') +# if wandb_api_key is None: +# raise ValueError("WANDB_API_KEY environment variable is not set.") +# +# wandb.login(key=wandb_api_key) -wandb.login(key=wandb_api_key) +def train(config, experiment_name, test_run): + """ + Runs the model training defined by the config. -project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + Config parameters: + + - model: Model used as in the models.py file, but without the Lit suffix + - data: Dataset used, as in seisbench.data + - model_args: Arguments passed to the constructor of the model lightning module + - trainer_args: Arguments passed to the lightning trainer + - batch_size: Batch size for training and validation + - num_workers: Number of workers for data loading. + If not set, uses environment variable BENCHMARK_DEFAULT_WORKERS + - restrict_to_phase: Filters datasets only to examples containing the given phase. + By default, uses all phases. + - training_fraction: Fraction of training blocks to use as float between 0 and 1. Defaults to 1. + + :param config: Configuration parameters for training + :param test_run: If true, makes a test run with less data and less logging. Intended for debug purposes. + """ + model = models.__getattribute__(config["model"] + "Lit")( + **config.get("model_args", {}) + ) + + train_loader, dev_loader = prepare_data(config, model, test_run) + + # CSV logger - also used for saving configuration as yaml + csv_logger = CSVLogger("weights", experiment_name) + csv_logger.log_hyperparams(config) + loggers = [csv_logger] + + default_root_dir = os.path.join( + "weights" + ) # Experiment name is parsed from the loggers + if not test_run: + # tb_logger = TensorBoardLogger("tb_logs", experiment_name) + # tb_logger.log_hyperparams(config) + # loggers += [tb_logger] + wandb_logger = WandbLogger() + wandb_logger.watch(model) + + loggers +=[wandb_logger] + + checkpoint_callback = ModelCheckpoint( + save_top_k=1, filename="{epoch}-{step}", monitor="val_loss", mode="min" + ) # save_top_k=1, monitor="val_loss", mode="min": save the best model in terms of validation loss + callbacks = [checkpoint_callback] + + ## Uncomment the following 2 lines to enable + # device_stats = DeviceStatsMonitor() + # callbacks.append(device_stats) + + trainer = pl.Trainer( + default_root_dir=default_root_dir, + logger=loggers, + callbacks=callbacks, + **config.get("trainer_args", {}), + ) + + trainer.fit(model, train_loader, dev_loader) -class PickMAE(Metric): - higher_is_better: bool = False - mae_error: Tensor +def prepare_data(config, model, test_run): + """ + Returns the training and validation data loaders + :param config: + :param model: + :param test_run: + :return: + """ + batch_size = config.get("batch_size", 1024) + if type(batch_size) == list: + batch_size = batch_size[0] - def __init__(self, sampling_rate): - super().__init__() - self.add_state("mae_error", default=torch.tensor(0), dist_reduce_fx="sum") - self.sampling_rate = sampling_rate + num_workers = config.get("num_workers", util.default_workers) + try: + dataset = data.get_dataset_by_name(config["dataset_name"])( + sampling_rate=100, component_order="ZNE", dimension_order="NCW", cache="full" + ) + except ValueError: + data_path = str(Path.cwd().parent) + '/' + config['data_path'] + print(data_path) + dataset = data.get_custom_dataset(data_path) - def update(self, preds: torch.Tensor, target: torch.Tensor): + restrict_to_phase = config.get("restrict_to_phase", None) + if restrict_to_phase is not None: + mask = generate_phase_mask(dataset, restrict_to_phase) + dataset.filter(mask, inplace=True) - assert preds.shape == target.shape + if "split" not in dataset.metadata.columns: + logging.warning("No split defined, adding auxiliary split.") + split = np.array(["train"] * len(dataset)) + split[int(0.6 * len(dataset)) : int(0.7 * len(dataset))] = "dev" + split[int(0.7 * len(dataset)) :] = "test" - pred_pick_idx = torch.argmax(preds[:, 0, :], dim=1).type(torch.FloatTensor) - true_pick_idx = torch.argmax(target[:, 0, :], dim=-1).type(torch.FloatTensor) + dataset._metadata["split"] = split - mae = nn.L1Loss() - self.mae_error = mae(pred_pick_idx, true_pick_idx) / self.sampling_rate #mae in seconds + train_data = dataset.train() + dev_data = dataset.dev() - def compute(self): - return self.mae_error.float() + if test_run: + # Only use a small part of the dataset + train_mask = np.zeros(len(train_data), dtype=bool) + train_mask[:5000] = True + train_data.filter(train_mask, inplace=True) + + dev_mask = np.zeros(len(dev_data), dtype=bool) + dev_mask[:5000] = True + dev_data.filter(dev_mask, inplace=True) + + training_fraction = config.get("training_fraction", 1.0) + apply_training_fraction(training_fraction, train_data) + + train_data.preload_waveforms(pbar=True) + dev_data.preload_waveforms(pbar=True) + + train_generator = sbg.GenericGenerator(train_data) + dev_generator = sbg.GenericGenerator(dev_data) + + train_generator.add_augmentations(model.get_train_augmentations()) + dev_generator.add_augmentations(model.get_val_augmentations()) + + train_loader = DataLoader( + train_generator, + batch_size=batch_size, + shuffle=True, + num_workers=num_workers, + worker_init_fn=worker_seeding, + drop_last=True, # Avoid crashes from batch norm layers for batch size 1 + ) + dev_loader = DataLoader( + dev_generator, + batch_size=batch_size, + num_workers=num_workers, + worker_init_fn=worker_seeding, + ) + + return train_loader, dev_loader -class EarlyStopper: - def __init__(self, patience=1, min_delta=0): - self.patience = patience - self.min_delta = min_delta - self.counter = 0 - self.min_validation_loss = np.inf +def apply_training_fraction(training_fraction, train_data): + """ + Reduces the size of train_data to train_fraction by inplace filtering. + Filter blockwise for efficient memory savings. - def early_stop(self, validation_loss): - if validation_loss < self.min_validation_loss: - self.min_validation_loss = validation_loss - self.counter = 0 - elif validation_loss > (self.min_validation_loss + self.min_delta): - self.counter += 1 - if self.counter >= self.patience: - return True - return False + :param training_fraction: Training fraction between 0 and 1. + :param train_data: Training dataset + :return: None + """ + + if not 0.0 < training_fraction <= 1.0: + raise ValueError("Training fraction needs to be between 0 and 1.") + + if training_fraction < 1: + blocks = train_data["trace_name"].apply(lambda x: x.split("$")[0]) + unique_blocks = blocks.unique() + np.random.shuffle(unique_blocks) + target_blocks = unique_blocks[: int(training_fraction * len(unique_blocks))] + target_blocks = set(target_blocks) + mask = blocks.isin(target_blocks) + train_data.filter(mask, inplace=True) -def get_data_generator(split, sampling_rate, path, sb_dataset="ethz", station=None, window='random'): +def generate_phase_mask(dataset, phases): + mask = np.zeros(len(dataset), dtype=bool) - if path is not None: - data = sbd.WaveformDataset(path, sampling_rate=sampling_rate) - phase_dict = { - "trace_Pg_arrival_sample": "P" - } - elif sb_dataset == "ethz": - data = sbd.ETHZ(sampling_rate=sampling_rate, force=True) - - phase_dict = { - "trace_p_arrival_sample": "P", - "trace_pP_arrival_sample": "P", - "trace_P_arrival_sample": "P", - "trace_P1_arrival_sample": "P", - "trace_Pg_arrival_sample": "P", - "trace_Pn_arrival_sample": "P", - "trace_PmP_arrival_sample": "P", - "trace_pwP_arrival_sample": "P", - "trace_pwPm_arrival_sample": "P", - # "trace_s_arrival_sample": "S", - # "trace_S_arrival_sample": "S", - # "trace_S1_arrival_sample": "S", - # "trace_Sg_arrival_sample": "S", - # "trace_SmS_arrival_sample": "S", - # "trace_Sn_arrival_sample": "S", - } - - dataset = data.get_split(split) - dataset.filter(dataset.metadata.trace_Pg_arrival_sample.notna()) - - print(split, dataset.metadata.shape, sampling_rate) - - if station is not None: - dataset.filter(dataset.metadata.station_code==station) - - data_generator = sbg.GenericGenerator(dataset) - if window == 'random': - print("using random window") - window_selector = sbg.RandomWindow(windowlen=3001, strategy="pad") - else: - window_selector = sbg.FixedWindow(windowlen=3001, p0=0, strategy="pad") - - augmentations = [ - sbg.WindowAroundSample(list(phase_dict.keys()), samples_before=3000, windowlen=6000, selection="random", - strategy="variable"), - window_selector, - sbg.Normalize(demean_axis=-1, amp_norm_axis=-1, amp_norm_type="peak"), - sbg.ChangeDtype(np.float32), - sbg.ProbabilisticLabeller(label_columns=phase_dict, sigma=30, dim=0) - ] - - data_generator.add_augmentations(augmentations) - - return data_generator - - -def get_data_generators(sampling_rate=100, path=project_path+"/data/igf/seisbench_format", sb_dataset="ethz", station=None, - window='random'): - - train_generator = get_data_generator("train", sampling_rate, path, sb_dataset, station, window) - dev_generator = get_data_generator("dev", sampling_rate, path, sb_dataset, station, window) - test_generator = get_data_generator("test", sampling_rate, path, sb_dataset, station, window) - - return train_generator, dev_generator, test_generator - - -def get_data_loaders(batch_size=256, sampling_rate=100, path=project_path+"/data/igf/seisbench_format", sb_dataset="ethz", - window='random'): - - train_generator, dev_generator, test_generator = get_data_generators(sampling_rate, path, sb_dataset, window=window) - num_workers = 0 # The number of threads used for loading data - - train_loader = DataLoader(train_generator, batch_size=batch_size, shuffle=True, num_workers=num_workers, - worker_init_fn=worker_seeding) - dev_loader = DataLoader(dev_generator, batch_size=batch_size, shuffle=False, num_workers=num_workers, - worker_init_fn=worker_seeding) - - test_loader = DataLoader(test_generator, batch_size=batch_size, shuffle=False, num_workers=num_workers, - worker_init_fn=worker_seeding) - - return train_loader, dev_loader, test_loader - - -def load_model(name="PhaseNet", pretrained=None, classes=2, modify_output=True): - - if name == "PhaseNet": - - if pretrained is not None and pretrained: - model = sbm.PhaseNet(phases="PN", norm="peak").from_pretrained(pretrained) + for key, phase in models.phase_dict.items(): + if phase not in phases: + continue else: - model = sbm.PhaseNet(phases="PN", norm="peak") + if key in dataset.metadata: + mask = np.logical_or(mask, ~np.isnan(dataset.metadata[key])) - if modify_output: - model.out = nn.Conv1d(model.filters_root, classes, 1, padding="same") - - return model - - -def train_one_epoch(model, dataloader, optimizer, pick_mae): - size = len(dataloader.dataset) - for batch_id, batch in enumerate(dataloader): - - # Compute prediction and loss - - pred = model(batch["X"].to(model.device)) - - loss = loss_fn(pred, batch["y"].to(model.device)) - - # Compute cross entropy loss - cross_entropy_loss = f.cross_entropy(pred, batch["y"]) - - # Compute mae - mae = pick_mae(pred, batch['y']) - - wandb.log({"loss": loss}) - wandb.log({"batch cross entropy loss": cross_entropy_loss}) - wandb.log({"p_mae": mae}) - - - # Backpropagation - optimizer.zero_grad() - loss.backward() - optimizer.step() - - if batch_id % 5 == 0: - loss, current = loss.item(), batch_id * batch["X"].shape[0] - print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") - print(f"mae: {mae:>7f}") - - -def test_one_epoch(model, dataloader, pick_mae, wandb_log=True): - - num_batches = len(dataloader) - test_loss = 0 - test_mae = 0 - - with torch.no_grad(): - for batch in dataloader: - pred = model(batch["X"].to(model.device)) - - test_loss += loss_fn(pred, batch["y"].to(model.device)).item() - test_mae += pick_mae(pred, batch['y']) - test_cross_entropy_loss = f.cross_entropy(pred, batch["y"]) - if wandb_log: - wandb.log({"batch cross entropy test loss": test_cross_entropy_loss}) - - test_loss /= num_batches - test_mae /= num_batches - - wandb.log({"test_p_mae": test_mae, "test_loss": test_loss}) - - print(f"Test avg loss: {test_loss:>8f}") - print(f"Test avg mae: {test_mae:>7f}\n") - - return test_loss, test_mae - - -def train_model(model, path_to_trained_model, train_loader, dev_loader): - - wandb.watch(model, log_freq=10) - - optimizer = torch.optim.Adam(model.parameters(), lr=wandb.config.learning_rate) - early_stopper = EarlyStopper(patience=3, min_delta=10) - pick_mae = PickMAE(wandb.config.sampling_rate) - - best_loss = np.inf - best_metrics = {} - - for t in range(wandb.config.epochs): - print(f"Epoch {t + 1}\n-------------------------------") - train_one_epoch(model, train_loader, optimizer, pick_mae) - test_loss, test_mae = test_one_epoch(model, dev_loader, pick_mae) - - if test_loss < best_loss: - best_loss = test_loss - best_metrics = {"test_p_mae": test_mae, "test_loss": test_loss} - torch.save(model.state_dict(), path_to_trained_model) - - if early_stopper.early_stop(test_loss): - break - - print("Best model: ", str(best_metrics)) - - -def loss_fn(y_pred, y_true, eps=1e-5): - # vector cross entropy loss - h = y_true * torch.log(y_pred + eps) - h = h.mean(-1).sum(-1) # Mean along sample dimension and sum along pick dimension - h = h.mean() # Mean over batch axis - return -h - - -def train_phasenet_on_sb_data(): - - config = { - "epochs": 3, - "batch_size": 256, - "dataset": "ethz", - "sampling_rate": 100, - "model_name": "PhaseNet" - } - - run = wandb.init( - # set the wandb project where this run will be logged - project="training_seisbench_models_on_igf_data", - # track hyperparameters and run metadata - config=config - ) - - wandb.run.log_code(".", include_fn=lambda path: path.endswith("training_wandb_sweep.py")) - - train_loader, dev_loader, test = get_data_loaders(batch_size=wandb.config.batch_size, - sampling_rate=wandb.config.sampling_rate, - path=None, - sb_dataset=wandb.config.dataset) - - model = load_model(name=wandb.config.model_name, pretrained=None, modify_output=True) - path_to_trained_model = f"{project_path}/models/{wandb.config.model_name}_trained_on_{wandb.config.data_set}.pt" - train_model(model, path_to_trained_model, - train_loader, dev_loader) - - artifact = wandb.Artifact('model', type='model') - artifact.add_file(path_to_trained_model) - run.log_artifact(artifact) - - run.finish() - - -def load_config(config_path): - with open(config_path, 'r') as f: - config = json.load(f) - return config - - -def train_sbmodel_on_igf_data(): - - config_path = project_path + "/experiments/config.json" - config = load_config(config_path) - - run = wandb.init( - # set the wandb project where this run will be logged - project="training_seisbench_models_on_igf_data", - # track hyperparameters and run metadata - config=config - ) - wandb.run.log_code(".", include_fn=lambda path: path.endswith("training_wandb_sweep.py")) - - print(wandb.config.batch_size, wandb.config.sampling_rate) - train_loader, dev_loader, test_loader = get_data_loaders(batch_size=wandb.config.batch_size, - sampling_rate=wandb.config.sampling_rate - ) - - model_name = wandb.config.model_name - pretrained = wandb.config.pretrained - - print(model_name, pretrained) - model = load_model(name=model_name, pretrained=pretrained) - path_to_trained_model = f"{project_path}/models/{model_name}_pretrained_on_{pretrained}_finetuned_on_{wandb.config.dataset}.pt" - train_model(model, path_to_trained_model, train_loader, dev_loader) - - artifact = wandb.Artifact('model', type='model') - artifact.add_file(path_to_trained_model) - run.log_artifact(artifact) - - run.finish() + return mask if __name__ == "__main__": - # train_phasenet_on_sb_data() - train_sbmodel_on_igf_data() + code_start_time = time.perf_counter() + torch.manual_seed(42) + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, required=True) + parser.add_argument("--test_run", action="store_true") + parser.add_argument("--lr", default=None, type=float) + args = parser.parse_args() + + with open(args.config, "r") as f: + config = json.load(f) + + experiment_name = os.path.basename(args.config)[:-5] + if args.lr is not None: + logging.warning(f"Overwriting learning rate to {args.lr}") + experiment_name += f"_{args.lr}" + config["model_args"]["lr"] = args.lr + + run = wandb.init( + # set the wandb project where this run will be logged + project="training_seisbench_models_on_igf_data_with_pick-benchmark", + # track hyperparameters and run metadata + config=config + ) + + if args.test_run: + experiment_name = experiment_name + "_test" + train(config, experiment_name, test_run=args.test_run) + + running_time = str( + datetime.timedelta(seconds=time.perf_counter() - code_start_time) + ) + print(f"Running time: {running_time}") diff --git a/scripts/training_wandb_sweep.py b/scripts/training_wandb_sweep.py deleted file mode 100644 index bbbe162..0000000 --- a/scripts/training_wandb_sweep.py +++ /dev/null @@ -1,62 +0,0 @@ -import os.path -import wandb -import yaml - -from train import get_data_loaders, load_model, train_model - -from dotenv import load_dotenv - -load_dotenv() -wandb_api_key = os.environ.get('WANDB_API_KEY') -if wandb_api_key is None: - raise ValueError("WANDB_API_KEY environment variable is not set.") - -wandb.login(key=wandb_api_key) - -project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sweep_config_path = project_path + "/experiments/sweep4.yaml" - -with open(sweep_config_path) as file: - sweep_configuration = yaml.load(file, Loader=yaml.FullLoader) - -sweep_id = wandb.sweep( - sweep=sweep_configuration, - project='training_seisbench_models_on_igf_data' - ) -sampling_rate = 100 - -def tune_training_hyperparams(): - - run = wandb.init( - # set the wandb project where this run will be logged - project="training_seisbench_models_on_igf_data", - # track hyperparameters and run metadata - config={"sampling_rate":sampling_rate} - ) - - wandb.run.log_code(".", include_fn=lambda path: path.endswith("training_wandb_sweep.py")) - - train_loader, dev_loader, test_loader = get_data_loaders(batch_size=wandb.config.batch_size, - sampling_rate=wandb.config.sampling_rate, - sb_dataset=wandb.config.dataset) - - model_name = wandb.config.model_name - pretrained = wandb.config.pretrained - print(wandb.config) - print(model_name, pretrained, type(pretrained), wandb.config.sampling_rate) - if not pretrained: - pretrained - model = load_model(name=model_name, pretrained=pretrained) - path_to_trained_model = f"{project_path}/models/{model_name}_pretrained_on_{pretrained}_finetuned_on_{wandb.config.dataset}.pt" - train_model(model, path_to_trained_model, train_loader, dev_loader) - - artifact = wandb.Artifact('model', type='model') - artifact.add_file(path_to_trained_model) - run.log_artifact(artifact) - - run.finish() - - -if __name__ == "__main__": - - wandb.agent(sweep_id, function=tune_training_hyperparams, count=10) diff --git a/scripts/util.py b/scripts/util.py new file mode 100644 index 0000000..697bff8 --- /dev/null +++ b/scripts/util.py @@ -0,0 +1,119 @@ +""" +This script offers general functionality required in multiple places. +""" + +import numpy as np +import pandas as pd +import os +import logging +import glob +import wandb +from dotenv import load_dotenv +import sys +from config_loader import models_path, configs_path +import yaml +load_dotenv() + + +logging.basicConfig() +logging.getLogger().setLevel(logging.DEBUG) + + +def load_best_model_data(sweep_id, weights): + """ + Determines the model with the lowest validation loss. + If sweep_id is not provided the best model is determined based on the validation loss in checkpoint filenames in weights directory. + + :param sweep_id: + :param weights: + :return: + """ + + if sweep_id is not None: + wandb_project_name = os.environ.get("WANDB_PROJECT") + wandb_user = os.environ.get("WANDB_USER") + api = wandb.Api() + sweep = api.sweep(f"{wandb_user}/{wandb_project_name}/{sweep_id}") + + # Get best run parameters + best_run = sweep.best_run() + run_id = best_run.id + matching_models = glob.glob(f"{models_path}/{weights}/*run={run_id}*ckpt") + if len(matching_models)!=1: + raise ValueError("Unable to determine the best checkpoint for run_id: " + run_id) + best_checkpoint_path = matching_models[0] + + else: + checkpoints_path = f"{models_path}/{weights}/*ckpt" + logging.debug(f"Searching for checkpoints in dir: {checkpoints_path}") + + checkpoints = glob.glob(checkpoints_path) + val_losses = [] + + for ckpt in checkpoints: + i = ckpt.index("val_loss=") + val_losses.append(float(ckpt[i + 9:-5])) + + best_checkpoint_path = checkpoints[np.argmin(val_losses)] + run_id_st = best_checkpoint_path.index("run=") + 4 + run_id_end = best_checkpoint_path.index("-epoch=") + run_id = best_checkpoint_path[run_id_st:run_id_end] + + return best_checkpoint_path, run_id + + +def load_best_model(model_cls, weights, version): + """ + Determines the model with lowest validation loss from the csv logs and loads it + + :param model_cls: Class of the lightning module to load + :param weights: Path to weights as in cmd arguments + :param version: String of version file + :return: Instance of lightning module that was loaded from the best checkpoint + """ + metrics = pd.read_csv(weights / version / "metrics.csv") + + idx = np.nanargmin(metrics["val_loss"]) + min_row = metrics.iloc[idx] + + # For default checkpoint filename, see https://github.com/Lightning-AI/lightning/pull/11805 + # and https://github.com/Lightning-AI/lightning/issues/16636. + # For example, 'epoch=0-step=1.ckpt' means the 1st step has finish, but the 1st epoch hasn't + checkpoint = f"epoch={min_row['epoch']:.0f}-step={min_row['step']+1:.0f}.ckpt" + + # For default save path of checkpoints, see https://github.com/Lightning-AI/lightning/pull/12372 + checkpoint_path = weights / version / "checkpoints" / checkpoint + + return model_cls.load_from_checkpoint(checkpoint_path) + + +default_workers = os.getenv("BENCHMARK_DEFAULT_WORKERS", None) +if default_workers is None: + logging.warning( + "BENCHMARK_DEFAULT_WORKERS not set. " + "Will use 12 workers if not specified otherwise in configuration." + ) + default_workers = 12 +else: + default_workers = int(default_workers) + + +def load_sweep_config(sweep_fname): + """ + Loads sweep config from yaml file + + :param sweep_fname: sweep yaml file, expected to be in configs_path + :return: Dictionary containing sweep config + """ + + sweep_config_path = f"{configs_path}/{sweep_fname}" + + try: + with open(sweep_config_path, "r") as file: + sweep_config = yaml.load(file, Loader=yaml.FullLoader) + except FileNotFoundError: + logging.error(f"Could not find sweep config file: {sweep_fname}. " + f"Please make sure the file exists and is in {configs_path} directory.") + sys.exit(1) + + return sweep_config