From b3c2866ece9d27f87b93074662eb494054f97aa9 Mon Sep 17 00:00:00 2001 From: marcorudolphflex Date: Tue, 2 Dec 2025 13:10:35 +0100 Subject: [PATCH] perf(tidy3d): FXC-3721 Speed up test suite --- .gitignore | 3 +- docs/development/usage.rst | 4 +- poetry.lock | 43 ++- pyproject.toml | 6 +- scripts/profile_pytest.py | 302 ++++++++++++++++++ .../test_components/autograd/test_autograd.py | 133 ++++---- tests/test_components/test_IO.py | 6 +- tests/test_components/test_custom.py | 4 + tests/test_components/test_eme.py | 2 + tests/test_components/test_scene.py | 9 +- tests/test_components/test_simulation.py | 19 +- .../test_package/test_parametric_variants.py | 3 +- tests/test_plugins/test_design.py | 9 +- tests/test_plugins/test_invdes.py | 1 + tests/test_web/test_webapi.py | 10 +- 15 files changed, 438 insertions(+), 116 deletions(-) create mode 100755 scripts/profile_pytest.py diff --git a/.gitignore b/.gitignore index 1e972cd884..3a581e2a3d 100644 --- a/.gitignore +++ b/.gitignore @@ -135,5 +135,6 @@ htmlcov/ .idea .vscode -# cProfile output +# profile outputs *.prof +pytest_profile_stats.txt diff --git a/docs/development/usage.rst b/docs/development/usage.rst index 37a706fc98..2edcea4dfb 100644 --- a/docs/development/usage.rst +++ b/docs/development/usage.rst @@ -67,6 +67,9 @@ There are a range of handy development functions that you might want to use to s * - Running ``pytest`` commands inside the ``poetry`` environment. - Make sure you have already installed ``tidy3d`` in ``poetry`` and you are in the root directory. - ``poetry run pytest`` + * - Analyze slow ``pytest`` runs with durations / cProfile / debug subset helpers. + - Use ``--debug`` to run only the first N collected tests or ``--profile`` to capture call stacks. + - ``python scripts/profile_pytest.py [options]`` * - Run ``coverage`` testing from the ``poetry`` environment. - - ``poetry run coverage run -m pytest`` @@ -84,4 +87,3 @@ There are a range of handy development functions that you might want to use to s - ``poetry run tidy3d develop replace-in-files`` - diff --git a/poetry.lock b/poetry.lock index a992103f19..5e3c4e8be2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "absl-py" @@ -1672,61 +1672,40 @@ markers = "extra == \"dev\" or extra == \"docs\" or extra == \"gdstk\"" files = [ {file = "gdstk-0.9.61-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8db8120b5b8864de074ed773d4c0788100b76eecd2bf327a6de338f011745e3f"}, {file = "gdstk-0.9.61-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ad942da613f6274e391371771b8cfef2854eb69f628914f716f518929567dcd4"}, - {file = "gdstk-0.9.61-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3b49ff5e43764783d2053b129fe1eac152910e2d062dfc2fd2408c9b91a043d5"}, {file = "gdstk-0.9.61-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5e53c0f765796b4fc449b72c800924df2e936820087816686e987962b3f0452a"}, {file = "gdstk-0.9.61-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc601258b850595b34e22b5c0fd1d98724a053faa4b1a23517c693b6eb01e275"}, - {file = "gdstk-0.9.61-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8ab2644f04a3d91e158bfce7c5dbdc60f09745cf7dc7fc19e9255cb6e6d9547b"}, {file = "gdstk-0.9.61-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:4aa897a629d20bca211cacf36e35a7316a5d6cfe03effb6af19c0eb7fd225421"}, {file = "gdstk-0.9.61-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9d20a09f06596ff2926e6b4ad12f3b0ae0ce545bf60211b96c2f9791f1df37fe"}, {file = "gdstk-0.9.61-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:688cc52aa1a5b9016eb0787a9cea4943a1aa2cc3d8d3cbeeaa44b3203f71e38f"}, {file = "gdstk-0.9.61-cp310-cp310-win_amd64.whl", hash = "sha256:5214c4f89fb9ff60ced79f6d2d28de4c5d5b588c9ef930fe72333edaa5e0bcf2"}, {file = "gdstk-0.9.61-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5fab80fa1e5ac4d956a04fdc78fb6971cb32a43418553939ee4ccf4eba6d4496"}, {file = "gdstk-0.9.61-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:82706a72f37c70340978fb70777cf94119408593f5a8c73c0700c0b84486a3fe"}, - {file = "gdstk-0.9.61-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6857359fc517fa91d6c0cd179cf09290aaebf538869d825585d9a0ed3cec754d"}, {file = "gdstk-0.9.61-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:459b1f28a6283bb61ed28c745aba3d49c5cbd9424fb81f76023d3f44b92c6257"}, {file = "gdstk-0.9.61-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3812aadf36764bb6ca86f1b9f4bdf8f8c41749bcdf1e3b45d6263e48b4f97eab"}, - {file = "gdstk-0.9.61-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f3c6f0df208263039851ac5d3d94fcddbc80029a69918d53c0b7dc392725d8fb"}, {file = "gdstk-0.9.61-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:7e166ef1c26fc0f48fa8194e54683e61ca43b72d3342708d4229855dcad137ed"}, {file = "gdstk-0.9.61-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:79dc9f0f0c5f6860199c9af09564bbfed4c34885d3f5b46ab9514ab0716cff39"}, {file = "gdstk-0.9.61-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4b3e2b367e5962db05845eaaf3f9d8bcfa3914738c6e174455a152a63d78904c"}, {file = "gdstk-0.9.61-cp311-cp311-win_amd64.whl", hash = "sha256:0c3866dc287d657f78ae587e2e10de2747ebbf5d2824dc6ba4f9ece89c36a35a"}, {file = "gdstk-0.9.61-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:61f0ee05cdce9b4163ea812cbf2e2f5d8d01a293fa118ff98348280306bd91d6"}, {file = "gdstk-0.9.61-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fff1b104b6775e4c27ab2751b3f4ac6c1ce86a4e9afd5e5535ac4acefa6a7a07"}, - {file = "gdstk-0.9.61-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5218f8c5ab13b6e979665c0a7dc1272768003a1cb7add0682483837f7485faed"}, {file = "gdstk-0.9.61-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e79f3881d3b3666a600efd5b2c131454507f69d3c9b9eaf383d106cfbd6e7bc"}, {file = "gdstk-0.9.61-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e90a6e24c2145320e53e953a59c6297fd25c17c6ef098fa8602e64e64a5390ea"}, - {file = "gdstk-0.9.61-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3a49401cbd26c5a17a4152d1befa73efb21af694524557bf09d15f4c8a874e6"}, {file = "gdstk-0.9.61-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:8738ac63bbe29dcb5abae6a19d207c4e0857f9dc1bd405c85af8a87f0dcfb348"}, {file = "gdstk-0.9.61-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:23bb023a49f3321673d0e32cdce2e2705a51d9e12328c928723ded49af970520"}, {file = "gdstk-0.9.61-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:81c2f19cab89623d1f56848e7a16e2fab82a93c61c8f7aa73f5ff59840b60c0f"}, {file = "gdstk-0.9.61-cp312-cp312-win_amd64.whl", hash = "sha256:4474f015ecc228b210165287cb7eea65639ea6308f60105cb49e970079bddc2b"}, {file = "gdstk-0.9.61-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3beeae846fc523c7e3a01c47edcd3b7dd83c29650e56b82a371e528f9cb0ec3e"}, {file = "gdstk-0.9.61-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:575a21639b31e2fab4d9e918468b8b40a58183028db563e5963be594bff1403d"}, - {file = "gdstk-0.9.61-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:90d54b48223dcbb8257769faaa87542d12a749d8486e8d1187a45d06e9422859"}, {file = "gdstk-0.9.61-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35405bed95542a0b10f343b165ce0ad80740bf8127a4507565ec74222e6ec8d3"}, {file = "gdstk-0.9.61-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b311ddf8982995b52ac3bf3b32a6cf6d918afc4e66dea527d531e8af73896231"}, - {file = "gdstk-0.9.61-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6dcbfc60fba92d10f1c7d612b5409c343fcaf2a380640e9fb01c504ca948b412"}, {file = "gdstk-0.9.61-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:fab67ccdd8029ef7eb873f8c98f875dc2665a5e45af7cf3d2a7a0f401826a1d3"}, {file = "gdstk-0.9.61-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5852749e203d6978e06d02f8ef9e29ce4512cb1aedeb62c37b8e8b2c10c4f529"}, {file = "gdstk-0.9.61-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ee38a54c799e77dbe219266f765bbd3b2906b62bc7b6fb64b1387e6db3dd187"}, {file = "gdstk-0.9.61-cp313-cp313-win_amd64.whl", hash = "sha256:6abb396873b2660dd7863d664b3822f00547bf7f216af27be9f1f812bc5e8027"}, - {file = "gdstk-0.9.61-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:a674af8be5cf1f8ea9f6c5b5f165f797d7e2ed74cbca68b4a22adb92b515fb35"}, - {file = "gdstk-0.9.61-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:38ec0b7285d6c9bf8cbc279731dc0d314633cda2ce9e6f9053554b3e5f004fcd"}, - {file = "gdstk-0.9.61-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3b63a77b57fb441c8017217aaf1e8b13d93cbee822031a8e2826adb716e01dd4"}, - {file = "gdstk-0.9.61-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7fae6eee627e837d1405b47d381ccd33dbba85473b1bb3822bdc8ae41dbc0dc"}, - {file = "gdstk-0.9.61-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9e396694cac24bd87d0e38c37e6740d9ba0c13f6c9f2211a871d62288430f069"}, - {file = "gdstk-0.9.61-cp314-cp314-win_amd64.whl", hash = "sha256:7ea0c1200dc53b794e9c0cc6fe3ea51e49113dfdd9c3109e1961cda3cc2197c7"}, - {file = "gdstk-0.9.61-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:616dd1c3e7aea4a98aeb03db7cf76a853d134c54690790eaa25c63eede7b869a"}, - {file = "gdstk-0.9.61-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b0e898202fbb7fd4c39f8404831415a0aa0445656342102c4e77d4a7c2c15a1d"}, - {file = "gdstk-0.9.61-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:29bb862a1a814f5bbd6f8bbc2f99e1163df9e6307071cb6e11251dbe7542feb5"}, - {file = "gdstk-0.9.61-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c6c2a08d82a683aff50dc63f2943ed805d32d46bd984cbd4ac9cf876146d0ef9"}, - {file = "gdstk-0.9.61-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3ba52f95763052a6968583942e6531ceca20c14c762d44fe2bd887445e2f73b6"}, {file = "gdstk-0.9.61-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1033d4ddd2af34461c1133ef62213a4861f23d07d64d66e92fe8d2554a85ba6d"}, {file = "gdstk-0.9.61-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bad94f74dff3efaa5ade7bab5040464e575839fa65b935c8f872a47e1658f535"}, - {file = "gdstk-0.9.61-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c9c8738b57cb6100cb0d4346272af489d05f9b9908e0018a5ecbcb5ee485fa97"}, {file = "gdstk-0.9.61-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7f9dd95da53d3cdbc3dcaed446b7404d8d4dfbdbd68628eeddde6285bc5a5"}, {file = "gdstk-0.9.61-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f73637dc2abe3754906f2911557b563281f868f5d153332edea681d963b2a22"}, - {file = "gdstk-0.9.61-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:56d493bb7fc3fb33de63d8c1107ff3d645b62596d0c2073f1a390d90bef73233"}, {file = "gdstk-0.9.61-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7905572cc10b2b85a960317eadb5cf95197b5a52b1ef9358336d5cd224e08314"}, {file = "gdstk-0.9.61-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a4bc70f308653d63c26d15637b27e2435f7bdaa50d072db410c1f573db6d985b"}, {file = "gdstk-0.9.61-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3794115a278d5a38db5c5a8f0cfeff3e1701263bcfb58b7e1934e199578e14f1"}, @@ -5229,6 +5208,22 @@ tomli = {version = ">=2.2.1", markers = "python_version < \"3.11\""} [package.extras] testing = ["covdefaults (>=2.3)", "coverage (>=7.10.7)", "pytest-mock (>=3.15.1)"] +[[package]] +name = "pytest-order" +version = "1.3.0" +description = "pytest plugin to run your tests in a specific order" +optional = true +python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"dev\"" +files = [ + {file = "pytest_order-1.3.0-py3-none-any.whl", hash = "sha256:2cd562a21380345dd8d5774aa5fd38b7849b6ee7397ca5f6999bbe6e89f07f6e"}, + {file = "pytest_order-1.3.0.tar.gz", hash = "sha256:51608fec3d3ee9c0adaea94daa124a5c4c1d2bb99b00269f098f414307f23dde"}, +] + +[package.dependencies] +pytest = {version = ">=6.2.4", markers = "python_version >= \"3.10\""} + [[package]] name = "pytest-timeout" version = "2.4.0" @@ -7746,7 +7741,7 @@ files = [ [extras] design = ["bayesian-optimization", "pygad", "pyswarms"] -dev = ["bayesian-optimization", "cma", "coverage", "devsim", "diff-cover", "dill", "gdstk", "grcwa", "ipython", "ipython", "jinja2", "jupyter", "memory_profiler", "mypy", "myst-parser", "nbconvert", "nbdime", "nbsphinx", "networkx", "openpyxl", "optax", "pre-commit", "psutil", "pydata-sphinx-theme", "pygad", "pylint", "pyswarms", "pytest", "pytest-cov", "pytest-env", "pytest-timeout", "pytest-xdist", "rtree", "ruff", "sax", "scikit-rf", "signac", "sphinx", "sphinx-book-theme", "sphinx-copybutton", "sphinx-design", "sphinx-favicon", "sphinx-notfound-page", "sphinx-sitemap", "sphinx-tabs", "sphinxemoji", "tmm", "torch", "torch", "tox", "trimesh", "vtk", "zizmor"] +dev = ["bayesian-optimization", "cma", "coverage", "devsim", "diff-cover", "dill", "gdstk", "grcwa", "ipython", "ipython", "jinja2", "jupyter", "memory_profiler", "mypy", "myst-parser", "nbconvert", "nbdime", "nbsphinx", "networkx", "openpyxl", "optax", "pre-commit", "psutil", "pydata-sphinx-theme", "pygad", "pylint", "pyswarms", "pytest", "pytest-cov", "pytest-env", "pytest-order", "pytest-timeout", "pytest-xdist", "rtree", "ruff", "sax", "scikit-rf", "signac", "sphinx", "sphinx-book-theme", "sphinx-copybutton", "sphinx-design", "sphinx-favicon", "sphinx-notfound-page", "sphinx-sitemap", "sphinx-tabs", "sphinxemoji", "tmm", "torch", "torch", "tox", "trimesh", "vtk", "zizmor"] docs = ["cma", "devsim", "gdstk", "grcwa", "ipython", "jinja2", "jupyter", "myst-parser", "nbconvert", "nbdime", "nbsphinx", "openpyxl", "optax", "pydata-sphinx-theme", "pylint", "sax", "signac", "sphinx", "sphinx-book-theme", "sphinx-copybutton", "sphinx-design", "sphinx-favicon", "sphinx-notfound-page", "sphinx-sitemap", "sphinx-tabs", "sphinxemoji", "tmm"] extras = ["tidy3d-extras"] gdstk = ["gdstk"] @@ -7761,4 +7756,4 @@ vtk = ["vtk"] [metadata] lock-version = "2.1" python-versions = ">=3.10,<3.14" -content-hash = "5c1131e43d860d4eb51f5fe8b308766a80877fb18a8ec82a774d28f777fc45d6" +content-hash = "f5dfe6490bd3f994e19bdc08e0319d077d0ae6049197416e43d6e50677023fa2" diff --git a/pyproject.toml b/pyproject.toml index d2a44e6559..5b8d4448ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,6 +65,7 @@ pytest-timeout = { version = "*", optional = true } pytest-xdist = "^3.6.1" pytest-cov = "^6.0.0" pytest-env = "^1.1.5" +pytest-order = { version = "^1.2.1", optional = true } tox = { version = "*", optional = true } diff-cover = { version = "*", optional = true } zizmor = { version = "*", optional = true } @@ -154,6 +155,7 @@ dev = [ 'pytest-xdist', 'pytest-env', 'pytest-cov', + 'pytest-order', 'rtree', 'ruff', 'sax', @@ -312,9 +314,11 @@ banned-module-level-imports = ["scipy", "matplotlib"] [tool.pytest.ini_options] # TODO: remove --assert=plain when https://github.com/scipy/scipy/issues/22236 is resolved -addopts = "--cov=tidy3d --doctest-modules -n auto --dist worksteal --assert=plain -m 'not numerical'" +addopts = "--cov=tidy3d --doctest-modules -n auto --dist worksteal --assert=plain -m 'not numerical and not perf'" markers = [ "numerical: marks numerical tests for adjoint gradients that require running simulations (deselect with '-m \"not numerical\"')", + "perf: marks tests which test the runtime of operations (deselect with '-m \"not perf\"')", + "slow: marks tests as slow (deselect with -m 'not slow')", ] env = ["MPLBACKEND=Agg", "OMP_NUM_THREADS=1", "TIDY3D_MICROWAVE__SUPPRESS_RF_LICENSE_WARNING=true"] doctest_optionflags = "NORMALIZE_WHITESPACE ELLIPSIS" diff --git a/scripts/profile_pytest.py b/scripts/profile_pytest.py new file mode 100755 index 0000000000..ddbb642018 --- /dev/null +++ b/scripts/profile_pytest.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python3 +"""Helper utilities for profiling ``pytest`` runs inside the Poetry env. + +This script can: +* run the full test suite (default) while surfacing the slowest tests via ``--durations``; +* run in "debug" mode to execute only the first N collected tests; and +* wrap ``pytest`` in ``cProfile`` to identify the most expensive function calls. + +Examples:: + + python scripts/profile_pytest.py # full suite with slowest 25 tests listed + python scripts/profile_pytest.py --debug --debug-limit 10 + python scripts/profile_pytest.py --profile --profile-output results.prof + python scripts/profile_pytest.py -t tests/test_components/test_scene.py \ + --pytest-args "-k basic" + +Forward any additional `pytest` CLI flags via ``--pytest-args"...`` and provide +explicit test targets with ``-t/--tests`` (defaults to the entire ``tests`` dir). +""" + +from __future__ import annotations + +import argparse +import re +import shlex +import shutil +import subprocess +import sys +from collections import defaultdict +from collections.abc import Iterable +from pathlib import Path + +try: + import pstats +except ImportError as exc: # pragma: no cover - stdlib module should exist + raise SystemExit("pstats from the standard library is required") from exc + +DURATION_LINE_RE = re.compile(r"^\s*(?P\d+(?:\.\d+)?)s\s+\w+\s+(?P\S+)\s*$") + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Profile pytest executions launched via Poetry.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "--debug", + action="store_true", + help="Run only a subset of collected tests (see --debug-limit).", + ) + parser.add_argument( + "--list-limit", + type=int, + default=30, + help="How many entries to show in aggregated duration summaries (set 0 for all).", + ) + parser.add_argument( + "--debug-limit", + type=int, + default=25, + help="Number of test node ids to execute when --debug is enabled.", + ) + parser.add_argument( + "--durations", + type=int, + default=0, + help="Pass-through value for pytest's --durations flag (use 0 for all tests).", + ) + parser.add_argument( + "--profile", + action="store_true", + help="Wrap pytest in cProfile and display the heaviest call sites afterward.", + ) + parser.add_argument( + "--profile-output", + default="results.prof", + help="Where to write the binary cProfile stats (used when --profile is set).", + ) + parser.add_argument( + "--profile-top", + type=int, + default=30, + help="How many rows of aggregated profile data to print.", + ) + parser.add_argument( + "--profile-sort", + choices=["cumulative", "tottime", "calls", "time"], + default="cumulative", + help="Sort order for the profile summary table.", + ) + parser.add_argument( + "-t", + "--tests", + action="append", + dest="tests", + metavar="PATH_OR_NODE", + help="Explicit pytest targets. Repeatable.", + ) + parser.add_argument( + "--pytest-args", + default="", + help="Extra pytest CLI args as a quoted string (e.g. '--maxfail=1 -k smoke').", + ) + return parser.parse_args() + + +def ensure_poetry_available() -> None: + if shutil.which("poetry") is None: + raise SystemExit("'poetry' command not found in PATH.") + + +def build_pytest_base(profile: bool, profile_output: Path) -> list[str]: + base_cmd = ["poetry", "run"] + if profile: + base_cmd += [ + "python", + "-m", + "cProfile", + "-o", + str(profile_output.resolve()), + "-m", + "pytest", + ] + else: + base_cmd.append("pytest") + return base_cmd + + +def collect_node_ids(extra_args: Iterable[str], tests: Iterable[str]) -> list[str]: + cmd = ["poetry", "run", "pytest", "--collect-only", "-q"] + cmd.extend(extra_args) + cmd.extend(tests) + print(f"Collecting tests via: {' '.join(shlex.quote(part) for part in cmd)}") + result = subprocess.run(cmd, capture_output=True, text=True, check=False) + sys.stdout.write(result.stdout) + sys.stderr.write(result.stderr) + if result.returncode != 0: + raise SystemExit(result.returncode) + + node_ids: list[str] = [] + for line in result.stdout.splitlines(): + stripped = line.strip() + if not stripped or stripped.startswith(("<", "collected ")): + continue + node_ids.append(stripped) + if not node_ids: + raise SystemExit("No tests collected; check your --tests / --pytest-args filters.") + return node_ids + + +def summarize_profile(stats_path: Path, sort: str, top: int) -> None: + if not stats_path.exists(): + print(f"Profile file {stats_path} not found; skipping summary.") + return + stats = pstats.Stats(str(stats_path)) + stats.sort_stats(sort) + print("\nTop profiled call sites (via cProfile):") + stats.print_stats(top) + + +def extract_durations_from_output(output: str) -> list[tuple[float, str]]: + """Parse pytest --durations lines from stdout.""" + + durations: list[tuple[float, str]] = [] + for line in output.splitlines(): + match = DURATION_LINE_RE.match(line) + if not match: + continue + secs = float(match.group("secs")) + nodeid = match.group("nodeid") + durations.append((secs, nodeid)) + return durations + + +def print_aggregated_durations( + durations: list[tuple[float, str]], + list_limit: int, +) -> None: + """Print durations aggregated by file and by test (collapsing parametrizations).""" + + if not durations: + print("\n[durations] no --durations lines found in pytest output.") + return + + by_file: dict[str, float] = defaultdict(float) + by_test: dict[str, float] = defaultdict(float) + + for secs, nodeid in durations: + base = nodeid.split("[", 1)[0] + file_name = base.split("::", 1)[0] + by_file[file_name] += secs + by_test[base] += secs + + def _print_section(title: str, mapping: dict[str, float]) -> None: + print(f"\nAggregated durations ({title}):") + items = sorted(mapping.items(), key=lambda kv: kv[1], reverse=True) + if list_limit > 0: + items = items[:list_limit] + for name, total in items: + print(f"{total:8.02f}s {name}") + + _print_section("by file", by_file) + _print_section("by test (parametrizations combined)", by_test) + + +def truncate_pytest_durations_output(output: str, limit: int) -> str: + """Keep pytest's duration section header, but show only the top `limit` lines.""" + lines = output.splitlines() + out_lines = [] + in_durations_section = False + kept = 0 + + for line in lines: + if "slowest" in line and "durations" in line: + in_durations_section = True + kept = 0 + out_lines.append(line) + continue + + if in_durations_section: + # Stop after we've shown N durations or reached next blank section + if not line.strip(): + in_durations_section = False + elif kept >= limit: + continue + else: + kept += 1 + + out_lines.append(line) + return "\n".join(out_lines) + + +def export_to_file(result, args, filtered_stdout, durations): + sys.stdout.write(filtered_stdout) + sys.stderr.write(result.stderr) + + # Write the filtered output to a file as well + output_file = "pytest_profile_stats.txt" + results_path = Path(output_file) + results_path.write_text(filtered_stdout) + + if durations: + print_aggregated_durations(durations, args.list_limit) + + with results_path.open("a") as f: + f.write("\n\n[Aggregated Durations]\n") + for secs, nodeid in durations: + f.write(f"{secs:.2f}s {nodeid}\n") + print(f"Stats were written to {output_file}") + + +def main() -> int: + args = parse_args() + ensure_poetry_available() + + if args.debug and args.debug_limit <= 0: + raise SystemExit("--debug-limit must be a positive integer.") + + tests = args.tests or ["tests"] + extra_args = shlex.split(args.pytest_args) + + # Handle debug collection (collect-only) + if args.debug: + collected = collect_node_ids(extra_args, tests) + pytest_targets = collected[: args.debug_limit] + print(f"\nDebug mode: running the first {len(pytest_targets)} collected test(s).") + else: + pytest_targets = tests + + # Build the full pytest command + base_cmd = build_pytest_base(args.profile, Path(args.profile_output)) + pytest_cmd = base_cmd + extra_args + if args.durations is not None: + pytest_cmd.append(f"--durations={args.durations}") + pytest_cmd.extend(pytest_targets) + + print(f"\nExecuting: {' '.join(shlex.quote(part) for part in pytest_cmd)}\n") + + # Run pytest + result = subprocess.run( + pytest_cmd, + check=False, + text=True, + capture_output=True, + ) + + # Extract and truncate outputs + filtered_stdout = truncate_pytest_durations_output(result.stdout, args.list_limit) + durations = extract_durations_from_output(result.stdout) if args.durations is not None else [] + + # Print once and export + export_to_file(result, args, filtered_stdout, durations) + + # Profile summary (if enabled) + if args.profile and result.returncode == 0: + summarize_profile(Path(args.profile_output), args.profile_sort, args.profile_top) + + return result.returncode + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tests/test_components/autograd/test_autograd.py b/tests/test_components/autograd/test_autograd.py index 8621eaded7..de044b48e4 100644 --- a/tests/test_components/autograd/test_autograd.py +++ b/tests/test_components/autograd/test_autograd.py @@ -41,13 +41,12 @@ numerical: adjoint with an extra numerical derivative test after speed: pipeline with cProfile to analyze performance """ +pytestmark = pytest.mark.order(0) # make it faster to toggle this TEST_CUSTOM_MEDIUM_SPEED = False TEST_POLYSLAB_SPEED = False -# whether to run numerical gradient tests, off by default because it runs real simulations -RUN_NUMERICAL = False _NUMERICAL_COMBINATION = ("polyslab", "mode") TEST_MODES = ("pipeline", "adjoint", "speed") @@ -239,7 +238,6 @@ def emulated_run_fwd(simulation, task_name, **run_kwargs) -> td.SimulationData: def emulated_run_bwd(simulation, task_name, **run_kwargs) -> td.SimulationData: """What gets called instead of ``web/api/autograd/autograd.py::_run_tidy3d_bwd``.""" - task_name_fwd = "".join(task_name.partition("_adjoint")[:-2]) # run the adjoint sim @@ -584,8 +582,8 @@ def field_vol_postprocess_fn(sim_data, mnt_data): for _, val in mnt_data.field_components.items(): value = value + abs(anp.sum(val.values)) intensity = anp.nan_to_num(anp.sum(sim_data.get_intensity(mnt_data.monitor.name).values)) - value += intensity - value += anp.sum(mnt_data.flux.values) + value = value + intensity + value = value + anp.sum(mnt_data.flux.values) return value field_point = td.FieldMonitor( @@ -598,8 +596,8 @@ def field_vol_postprocess_fn(sim_data, mnt_data): def field_point_postprocess_fn(sim_data, mnt_data): value = 0.0 for _, val in mnt_data.field_components.items(): - value += abs(anp.sum(abs(val.values))) - value += anp.sum(sim_data.get_intensity(mnt_data.monitor.name).values) + value = value + abs(anp.sum(abs(val.values))) + value = value + anp.sum(sim_data.get_intensity(mnt_data.monitor.name).values) return value return { @@ -658,7 +656,7 @@ def plot_sim(sim: td.Simulation, plot_eps: bool = True) -> None: args = [("polyslab", "mode")] -# args = [("polyslab", "mode")] +ASYNC_TEST_ARGS = args[:2] def get_functions(structure_key: str, monitor_key: str) -> dict[str, typing.Callable]: @@ -727,7 +725,7 @@ def test_polyslab_axis_ops(axis): basis_vecs = p.edge_basis_vectors(edges=edges) -@pytest.mark.skipif(not RUN_NUMERICAL, reason="Numerical gradient tests runs through web API.") +@pytest.mark.numerical @pytest.mark.parametrize("structure_key, monitor_key", (_NUMERICAL_COMBINATION,)) def test_autograd_numerical(structure_key, monitor_key): """Test an objective function through tidy3d autograd.""" @@ -826,6 +824,7 @@ def objective(*args): @pytest.mark.parametrize("structure_key, monitor_key", args) +@pytest.mark.slow def test_autograd_objective(use_emulated_run, structure_key, monitor_key): """Test an objective function through tidy3d autograd.""" @@ -858,31 +857,58 @@ def objective(*args): assert anp.all(grad != 0.0), "some gradients are 0" -@pytest.mark.parametrize("structure_key, monitor_key", args) -@pytest.mark.parametrize("use_task_names", [True, False]) -def test_autograd_async(use_emulated_run, structure_key, monitor_key, use_task_names): - """Test an objective function through tidy3d autograd.""" +def _compare_async_vs_sync(fn_dicts, local_gradient) -> None: + """Compare async vs non-async autograd for a subset of structure/monitor pairs.""" - fn_dict = get_functions(structure_key, monitor_key) - make_sim = fn_dict["sim"] - postprocess = fn_dict["postprocess"] + # synchronous objective: run() one sim after another + def objective_sync(*params): + total = 0.0 + for i, fn_dict in enumerate(fn_dicts): + sim = fn_dict["sim"](*params) + data = run( + sim, task_name=f"autograd_sync_{i}", verbose=False, local_gradient=local_gradient + ) + total = total + fn_dict["postprocess"](data) + return total - task_names = {"test_a", "adjoint", "_test"} + def objective_async(*params): + sims = {} + for i, fn_dict in enumerate(fn_dicts): + sim = fn_dict["sim"](*params) + key = f"autograd_{i}" + sims[key] = sim - def objective(*args): - if use_task_names: - sims = {task_name: make_sim(*args) for task_name in task_names} - else: - sims = [make_sim(*args)] * len(task_names) - batch_data = run_async(sims, verbose=False) - value = 0.0 - for _, sim_data in batch_data.items(): - value += postprocess(sim_data) - return value + batch_data = run_async(sims, verbose=False, local_gradient=local_gradient) - val, grad = ag.value_and_grad(objective)(params0) - print(val, grad) - assert anp.all(grad != 0.0), "some gradients are 0" + total = 0.0 + for i, fn_dict in enumerate(fn_dicts): + key = f"autograd_{i}" + total = total + fn_dict["postprocess"](batch_data[key]) + return total + + val_sync, grad_sync = ag.value_and_grad(objective_sync)(params0) + val_async, grad_async = ag.value_and_grad(objective_async)(params0) + + val_sync = float(val_sync) + val_async = float(val_async) + grad_sync = np.asarray(grad_sync) + grad_async = np.asarray(grad_async) + + np.testing.assert_allclose(val_async, val_sync, rtol=1e-8, atol=1e-10) + np.testing.assert_allclose(grad_async, grad_sync, rtol=1e-6, atol=1e-8) + + +@pytest.mark.slow +@pytest.mark.parametrize("local_gradient", [True, False]) +def test_autograd_async(use_emulated_run, local_gradient): + """Async autograd for a small subset; must match non-async autograd.""" + + # only use two structure/monitor combinations to keep this test cheap + fn_dicts = [ + get_functions(structure_key, monitor_key) for structure_key, monitor_key in ASYNC_TEST_ARGS + ] + + _compare_async_vs_sync(fn_dicts, local_gradient) class TestTupleGrads: @@ -962,11 +988,9 @@ def obj(center: tuple, size: tuple) -> float: assert not np.allclose(dp_dsize, 0) -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_async_some_zero_grad(use_emulated_run, structure_key, monitor_key): +def test_autograd_async_some_zero_grad(use_emulated_run): """Test objective where only some simulations in batch have adjoint sources.""" - - fn_dict = get_functions(structure_key, monitor_key) + fn_dict = get_functions(args[0][0], args[0][1]) make_sim = fn_dict["sim"] postprocess = fn_dict["postprocess"] @@ -1006,6 +1030,7 @@ def objective(*args): grad = ag.grad(objective)(params0) +@pytest.mark.perf def test_autograd_speed_num_structures(use_emulated_run): """Test an objective function through tidy3d autograd.""" @@ -1108,8 +1133,9 @@ def objective_cylinder(params): @pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_server(use_emulated_run, structure_key, monitor_key): - """Test an objective function through tidy3d autograd.""" +@pytest.mark.slow +def test_autograd_local(use_emulated_run, structure_key, monitor_key): + """Test an objective function through tidy3d autograd with local gradients.""" fn_dict = get_functions(structure_key, monitor_key) make_sim = fn_dict["sim"] @@ -1118,7 +1144,7 @@ def test_autograd_server(use_emulated_run, structure_key, monitor_key): def objective(*args): """Objective function.""" sim = make_sim(*args) - data = run(sim, task_name="autograd_test", verbose=False, local_gradient=False) + data = run(sim, task_name="autograd_test", verbose=False, local_gradient=True) value = postprocess(data) return value @@ -1126,28 +1152,6 @@ def objective(*args): assert np.all(np.abs(grad) > 0), "some gradients are 0" -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_autograd_async_server(use_emulated_run, structure_key, monitor_key): - """Test an async objective function through tidy3d autograd.""" - - fn_dict = get_functions(structure_key, monitor_key) - make_sim = fn_dict["sim"] - postprocess = fn_dict["postprocess"] - - def objective(*args): - """Objective function.""" - sim = make_sim(*args) - sims = {"autograd_test1": sim, "autograd_test2": sim} - batch_data = run_async(sims, verbose=False, local_gradient=False) - value = 0.0 - for _, sim_data in batch_data.items(): - value = value + postprocess(sim_data) - return value - - val, grad = ag.value_and_grad(objective)(params0) - assert np.all(np.abs(grad) > 0), "some gradients are 0" - - @pytest.mark.parametrize("structure_key", ("custom_med",)) def test_sim_full_ops(structure_key): """make sure the autograd operations don't error on a simulation containing everything.""" @@ -2027,6 +2031,7 @@ def f(eps_inf, poles): assert np.allclose(grads_computed[field_path], np.conj(grad_poles[i][j])) +@pytest.mark.slow def test_custom_sellmeier(monkeypatch): """Test that computed CustomSellmeier derivatives match analytic mapping.""" @@ -2531,8 +2536,8 @@ def objective(params): print(g) -@pytest.mark.parametrize("structure_key", structure_keys_) -def test_multi_frequency_equivalence(use_emulated_run, structure_key): +@pytest.mark.slow +def test_multi_frequency_equivalence(use_emulated_run): """Test an objective function through tidy3d autograd.""" def objective_indi(params, structure_key) -> float: @@ -2562,6 +2567,7 @@ def objective_multi(params, structure_key) -> float: amps = get_amps(sim_data, "multi").sel(mode_index=0, direction="+") return power(amps) + structure_key = structure_keys_[0] params0_ = params0 + 1.0 # J_indi = objective_indi(params0_, structure_key) @@ -2989,10 +2995,9 @@ def objective(params): assert anp.all(grad != 0.0), "some gradients are 0 for conductivity-only test" -@pytest.mark.parametrize("structure_key, monitor_key", args) -def test_vjp_nan(use_emulated_run, structure_key, monitor_key): +def test_vjp_nan(use_emulated_run): """Test vjp data that has nan in it is flagged as an error.""" - + structure_key, monitor_key = args[0] fn_dict = get_functions(structure_key, monitor_key) make_sim = fn_dict["sim"] postprocess = fn_dict["postprocess"] diff --git a/tests/test_components/test_IO.py b/tests/test_components/test_IO.py index 83114f5678..fda5e6f890 100644 --- a/tests/test_components/test_IO.py +++ b/tests/test_components/test_IO.py @@ -176,16 +176,18 @@ def test_1a_simulation_load_export2(tmp_path): assert SIM2 == SIM3, "original and loaded simulations are not the same" +@pytest.mark.perf def test_validation_speed(tmp_path): sizes_bytes = [] times_sec = [] path = str(tmp_path / "simulation.json") _ = SIM - N_tests = 10 + N_tests = 2 # may be increased temporarily, makes it slow for routine tests + max_structures = np.log10(2) # may be increased temporarily, makes it slow for routine tests # adjust as needed, keeping small to speed tests up - num_structures = np.logspace(0, 2, N_tests).astype(int) + num_structures = np.logspace(0, max_structures, N_tests).astype(int) for n in num_structures: new_structures = [] diff --git a/tests/test_components/test_custom.py b/tests/test_components/test_custom.py index e52adf321e..3b13d5428e 100644 --- a/tests/test_components/test_custom.py +++ b/tests/test_components/test_custom.py @@ -718,6 +718,7 @@ def verify_custom_dispersive_medium_methods(mat, reduced_fields): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_pole_residue(unstructured): """Custom pole residue medium.""" seed = 98345 @@ -776,6 +777,7 @@ def test_custom_pole_residue(unstructured): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_sellmeier(unstructured): """Custom Sellmeier medium.""" seed = 897245 @@ -838,6 +840,7 @@ def test_custom_sellmeier(unstructured): @pytest.mark.parametrize("unstructured", [False, True]) +@pytest.mark.slow def test_custom_lorentz(unstructured): """Custom Lorentz medium.""" seed = 31342 @@ -991,6 +994,7 @@ def test_custom_debye(unstructured): @pytest.mark.parametrize("unstructured", [True]) +@pytest.mark.slow def test_custom_anisotropic_medium(unstructured): """Custom anisotropic medium.""" seed = 43243 diff --git a/tests/test_components/test_eme.py b/tests/test_components/test_eme.py index 058ae384ac..5dc82272c7 100644 --- a/tests/test_components/test_eme.py +++ b/tests/test_components/test_eme.py @@ -904,6 +904,7 @@ def _get_eme_mode_solver_data(num_sweep=0): ) +@pytest.mark.slow def _get_eme_field_data(num_sweep=0): dataset = _get_eme_field_dataset(num_sweep=num_sweep) kwargs = dataset.field_components @@ -971,6 +972,7 @@ def _get_eme_port_modes(num_sweep=0): return mode_data.updated_copy(n_complex=n_complex, **kwargs) +@pytest.mark.slow def test_eme_sim_data(): sim = make_eme_sim() mode_monitor_data = _get_eme_mode_solver_data() diff --git a/tests/test_components/test_scene.py b/tests/test_components/test_scene.py index 09667b6030..bad9493d03 100644 --- a/tests/test_components/test_scene.py +++ b/tests/test_components/test_scene.py @@ -9,7 +9,7 @@ import pytest import tidy3d as td -from tidy3d.components.scene import MAX_NUM_MEDIUMS +from tidy3d.components import scene from tidy3d.components.viz import STRUCTURE_EPS_CMAP, STRUCTURE_EPS_CMAP_R from tidy3d.exceptions import SetupError @@ -18,6 +18,7 @@ SCENE = td.Scene() SCENE_FULL = SIM_FULL.scene +TEST_MAX_NUM_MEDIUMS = 3 def test_scene_init(): @@ -200,11 +201,11 @@ def test_structure_eps_color_mapping(): assert np.allclose(pp_max_reverse.facecolor, expected_max_reverse) -def test_num_mediums(): +def test_num_mediums(monkeypatch): """Make sure we error if too many mediums supplied.""" - + monkeypatch.setattr(scene, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) structures = [] - for i in range(MAX_NUM_MEDIUMS): + for i in range(TEST_MAX_NUM_MEDIUMS): structures.append( td.Structure(geometry=td.Box(size=(1, 1, 1)), medium=td.Medium(permittivity=i + 1)) ) diff --git a/tests/test_components/test_simulation.py b/tests/test_components/test_simulation.py index 3fdc73f02f..e691f68ffe 100644 --- a/tests/test_components/test_simulation.py +++ b/tests/test_components/test_simulation.py @@ -12,8 +12,7 @@ from matplotlib.testing.compare import compare_images import tidy3d as td -from tidy3d.components import simulation -from tidy3d.components.scene import MAX_NUM_MEDIUMS +from tidy3d.components import scene, simulation from tidy3d.components.simulation import MAX_NUM_SOURCES from tidy3d.exceptions import SetupError, Tidy3dError, Tidy3dKeyError from tidy3d.plugins.mode import ModeSolver @@ -29,6 +28,7 @@ SIM = td.Simulation(size=(1, 1, 1), run_time=1e-12, grid_spec=td.GridSpec(wavelength=1.0)) RTOL = 0.01 +TEST_MAX_NUM_MEDIUMS = 3 def test_sim_init(): @@ -1694,12 +1694,10 @@ def test_sim_validate_structure_bounds_pml(box_length, absorb_type, log_level): def test_num_mediums(monkeypatch): """Make sure we error if too many mediums supplied.""" - - max_num_mediums = 10 - monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", max_num_mediums) + monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) structures = [] grid_spec = td.GridSpec.auto(wavelength=1.0) - for i in range(max_num_mediums): + for i in range(TEST_MAX_NUM_MEDIUMS): structures.append( td.Structure(geometry=td.Box(size=(1, 1, 1)), medium=td.Medium(permittivity=i + 1)) ) @@ -3226,9 +3224,9 @@ def test_advanced_material_intersection(): sim = sim.updated_copy(structures=[struct1, struct2]) -def test_num_lumped_elements(): +def test_num_lumped_elements(monkeypatch): """Make sure we error if too many lumped elements supplied.""" - + monkeypatch.setattr(simulation, "MAX_NUM_MEDIUMS", TEST_MAX_NUM_MEDIUMS) resistor = td.LumpedResistor( size=(0, 1, 2), center=(0, 0, 0), name="R1", voltage_axis=2, resistance=75 ) @@ -3238,7 +3236,7 @@ def test_num_lumped_elements(): size=(5, 5, 5), grid_spec=grid_spec, structures=[], - lumped_elements=[resistor] * MAX_NUM_MEDIUMS, + lumped_elements=[resistor] * TEST_MAX_NUM_MEDIUMS, run_time=1e-12, ) with pytest.raises(pydantic.ValidationError): @@ -3246,7 +3244,7 @@ def test_num_lumped_elements(): size=(5, 5, 5), grid_spec=grid_spec, structures=[], - lumped_elements=[resistor] * (MAX_NUM_MEDIUMS + 1), + lumped_elements=[resistor] * (TEST_MAX_NUM_MEDIUMS + 1), run_time=1e-12, ) @@ -3748,7 +3746,6 @@ def test_messages_contain_object_names(): def test_structures_per_medium(monkeypatch): """Test if structures that share the same medium warn or error appropriately.""" - import tidy3d.components.scene as scene # Set low thresholds to keep the test fast; ensure len(structures) > MAX to avoid early return monkeypatch.setattr(scene, "WARN_STRUCTURES_PER_MEDIUM", 2) diff --git a/tests/test_package/test_parametric_variants.py b/tests/test_package/test_parametric_variants.py index ddd0d94bbc..f514ab7c3a 100644 --- a/tests/test_package/test_parametric_variants.py +++ b/tests/test_package/test_parametric_variants.py @@ -32,7 +32,8 @@ def test_graphene_defaults(): _ = graphene.numerical_conductivity(freqs) -@pytest.mark.parametrize("rng_seed", np.arange(0, 15)) +@pytest.mark.parametrize("rng_seed", np.arange(0, 8)) +@pytest.mark.slow def test_graphene(rng_seed): """test graphene for range of physical parameters""" rng = default_rng(rng_seed) diff --git a/tests/test_plugins/test_design.py b/tests/test_plugins/test_design.py index 376c68849b..5d05f2610c 100644 --- a/tests/test_plugins/test_design.py +++ b/tests/test_plugins/test_design.py @@ -18,7 +18,7 @@ SWEEP_METHODS = { "grid": tdd.MethodGrid(), "monte_carlo": tdd.MethodMonteCarlo(num_points=5, seed=1), - "bay_opt": tdd.MethodBayOpt(initial_iter=5, n_iter=2, seed=1), + "bay_opt": tdd.MethodBayOpt(initial_iter=3, n_iter=2, seed=2), "gen_alg": tdd.MethodGenAlg( solutions_per_pop=6, n_generations=2, @@ -323,15 +323,15 @@ def init_design_space(sweep_method): radius_variable = tdd.ParameterFloat( name="radius", span=(0, 1.5), - num_points=5, # note: only used for MethodGrid + num_points=3, # note: only used for MethodGrid ) num_spheres_variable = tdd.ParameterInt( name="num_spheres", - span=(0, 3), + span=(0, 2), ) - tag_variable = tdd.ParameterAny(name="tag", allowed_values=("tag1", "tag2", "tag3")) + tag_variable = tdd.ParameterAny(name="tag", allowed_values=("tag1", "tag2")) design_space = tdd.DesignSpace( parameters=[radius_variable, num_spheres_variable, tag_variable], @@ -344,6 +344,7 @@ def init_design_space(sweep_method): @pytest.mark.parametrize("sweep_method", SWEEP_METHODS.values()) +@pytest.mark.slow def test_sweep(sweep_method, monkeypatch): # Problem, simulate scattering cross section of sphere ensemble # simulation consists of `num_spheres` spheres of radius `radius`. diff --git a/tests/test_plugins/test_invdes.py b/tests/test_plugins/test_invdes.py index f3ba2f3b96..361019448c 100644 --- a/tests/test_plugins/test_invdes.py +++ b/tests/test_plugins/test_invdes.py @@ -368,6 +368,7 @@ def test_continue_run_fns(use_emulated_run): # noqa: F811 ) +@pytest.mark.slow def test_continue_run_from_file(use_emulated_run): # noqa: F811 """Test continuing an already run inverse design from file.""" result_orig = make_result(use_emulated_run) diff --git a/tests/test_web/test_webapi.py b/tests/test_web/test_webapi.py index 4fdd280bf0..211fc2d158 100644 --- a/tests/test_web/test_webapi.py +++ b/tests/test_web/test_webapi.py @@ -1061,18 +1061,22 @@ def test_job_run_accepts_pathlikes(monkeypatch, tmp_path, path_builder): [_pathlib_builder, _posix_builder, _str_builder, _fspath_builder], ids=["pathlib.Path", "posixpath_str", "str", "PathLike"], ) +@pytest.mark.slow def test_batch_run_accepts_pathlike_dir(monkeypatch, tmp_path, dir_builder): """Batch.run(path_dir=...) accepts any PathLike directory location.""" - sims = {"A": make_sim(), "B": make_sim()} + sims = {"A": make_sim()} out_dir = dir_builder(tmp_path, "batch_out") # Map task_ids to sims: upload() is patched to return task_name, which for dict input # corresponds to the dict keys ("A", "B"), so we map those. - apply_common_patches(monkeypatch, tmp_path, taskid_to_sim={"A": sims["A"], "B": sims["B"]}) + apply_common_patches(monkeypatch, tmp_path, taskid_to_sim={"A": sims["A"]}) b = Batch(simulations=sims, folder_name=PROJECT_NAME) b.run(path_dir=out_dir) - # Directory created and two .hdf5 outputs produced + # Directory created and .hdf5 output produced out_dir_str = os.fspath(out_dir) assert os.path.isdir(out_dir_str) + + batch_file = Path(out_dir) / "batch.hdf5" + assert batch_file.is_file()