From 15bf0d792e3c7693c655961fe7f7c918f3accee6 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 00:24:20 +0000 Subject: [PATCH 01/14] feat: Add Task processor - Port `flagsmith/flagsmith-task-processor` - Port `waitfordb` management command - Introduce PostgreSQL for tests - Bump Poetry from 2.0.1 to 2.1.1 --- .github/workflows/python-test.yml | 21 +- Makefile | 40 +- docker/docker-compose.local.yml | 19 + poetry.lock | 267 ++++++- pyproject.toml | 15 +- settings/dev.py | 29 +- src/common/core/management/commands/start.py | 26 +- .../core/management/commands/waitfordb.py | 87 +++ src/task_processor/__init__.py | 0 src/task_processor/admin.py | 38 + src/task_processor/apps.py | 18 + src/task_processor/decorators.py | 206 ++++++ src/task_processor/exceptions.py | 10 + src/task_processor/health.py | 44 ++ src/task_processor/managers.py | 18 + src/task_processor/migrations/0001_initial.py | 44 ++ .../migrations/0002_healthcheckmodel.py | 21 + .../migrations/0003_add_completed_to_task.py | 22 + .../migrations/0004_recreate_task_indexes.py | 43 ++ ...005_update_conditional_index_conditions.py | 45 ++ .../migrations/0006_auto_20230221_0802.py | 45 ++ .../migrations/0007_add_is_locked.py | 23 + .../0008_add_get_task_to_process_function.py | 31 + ...009_add_recurring_task_run_first_run_at.py | 18 + .../migrations/0010_task_priority.py | 27 + ...11_add_priority_to_get_tasks_to_process.py | 27 + .../0012_add_locked_at_and_timeout.py | 39 ++ src/task_processor/migrations/__init__.py | 0 .../migrations/helpers/__init__.py | 9 + .../migrations/helpers/postgres_helpers.py | 41 ++ .../0008_get_recurring_tasks_to_process.sql | 30 + .../sql/0008_get_tasks_to_process.sql | 30 + .../sql/0011_get_tasks_to_process.sql | 30 + .../0012_get_recurringtasks_to_process.sql | 33 + src/task_processor/migrations/sql/__init__.py | 0 src/task_processor/models.py | 245 +++++++ src/task_processor/monitoring.py | 12 + src/task_processor/processor.py | 138 ++++ src/task_processor/py.typed | 0 src/task_processor/serializers.py | 7 + src/task_processor/task_registry.py | 82 +++ src/task_processor/task_run_method.py | 7 + src/task_processor/tasks.py | 71 ++ src/task_processor/threads.py | 112 +++ src/task_processor/types.py | 18 + src/task_processor/urls.py | 5 + src/task_processor/utils.py | 71 ++ src/task_processor/views.py | 20 + tests/unit/task_processor/__init__.py | 0 tests/unit/task_processor/conftest.py | 45 ++ .../test_unit_task_processor_decorators.py | 229 ++++++ .../test_unit_task_processor_health.py | 43 ++ .../test_unit_task_processor_models.py | 71 ++ .../test_unit_task_processor_monitoring.py | 38 + .../test_unit_task_processor_processor.py | 654 ++++++++++++++++++ .../test_unit_task_processor_tasks.py | 193 ++++++ .../test_unit_task_processor_threads.py | 46 ++ 57 files changed, 3475 insertions(+), 28 deletions(-) create mode 100644 docker/docker-compose.local.yml create mode 100644 src/common/core/management/commands/waitfordb.py create mode 100644 src/task_processor/__init__.py create mode 100644 src/task_processor/admin.py create mode 100644 src/task_processor/apps.py create mode 100644 src/task_processor/decorators.py create mode 100644 src/task_processor/exceptions.py create mode 100644 src/task_processor/health.py create mode 100644 src/task_processor/managers.py create mode 100644 src/task_processor/migrations/0001_initial.py create mode 100644 src/task_processor/migrations/0002_healthcheckmodel.py create mode 100644 src/task_processor/migrations/0003_add_completed_to_task.py create mode 100644 src/task_processor/migrations/0004_recreate_task_indexes.py create mode 100644 src/task_processor/migrations/0005_update_conditional_index_conditions.py create mode 100644 src/task_processor/migrations/0006_auto_20230221_0802.py create mode 100644 src/task_processor/migrations/0007_add_is_locked.py create mode 100644 src/task_processor/migrations/0008_add_get_task_to_process_function.py create mode 100644 src/task_processor/migrations/0009_add_recurring_task_run_first_run_at.py create mode 100644 src/task_processor/migrations/0010_task_priority.py create mode 100644 src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py create mode 100644 src/task_processor/migrations/0012_add_locked_at_and_timeout.py create mode 100644 src/task_processor/migrations/__init__.py create mode 100644 src/task_processor/migrations/helpers/__init__.py create mode 100644 src/task_processor/migrations/helpers/postgres_helpers.py create mode 100644 src/task_processor/migrations/sql/0008_get_recurring_tasks_to_process.sql create mode 100644 src/task_processor/migrations/sql/0008_get_tasks_to_process.sql create mode 100644 src/task_processor/migrations/sql/0011_get_tasks_to_process.sql create mode 100644 src/task_processor/migrations/sql/0012_get_recurringtasks_to_process.sql create mode 100644 src/task_processor/migrations/sql/__init__.py create mode 100644 src/task_processor/models.py create mode 100644 src/task_processor/monitoring.py create mode 100644 src/task_processor/processor.py create mode 100644 src/task_processor/py.typed create mode 100644 src/task_processor/serializers.py create mode 100644 src/task_processor/task_registry.py create mode 100644 src/task_processor/task_run_method.py create mode 100644 src/task_processor/tasks.py create mode 100644 src/task_processor/threads.py create mode 100644 src/task_processor/types.py create mode 100644 src/task_processor/urls.py create mode 100644 src/task_processor/utils.py create mode 100644 src/task_processor/views.py create mode 100644 tests/unit/task_processor/__init__.py create mode 100644 tests/unit/task_processor/conftest.py create mode 100644 tests/unit/task_processor/test_unit_task_processor_decorators.py create mode 100644 tests/unit/task_processor/test_unit_task_processor_health.py create mode 100644 tests/unit/task_processor/test_unit_task_processor_models.py create mode 100644 tests/unit/task_processor/test_unit_task_processor_monitoring.py create mode 100644 tests/unit/task_processor/test_unit_task_processor_processor.py create mode 100644 tests/unit/task_processor/test_unit_task_processor_tasks.py create mode 100644 tests/unit/task_processor/test_unit_task_processor_threads.py diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index ce72836..4d72b0a 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -12,6 +12,15 @@ jobs: matrix: python-version: ["3.11", "3.12"] + services: + postgres: + image: postgres:15.5-alpine + env: + POSTGRES_PASSWORD: password + POSTGRES_DB: flagsmith + ports: ['5432:5432'] + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + steps: - uses: actions/checkout@v4 @@ -23,13 +32,17 @@ jobs: run: pipx install poetry - name: Install Dependencies - run: poetry install --with dev + env: + opts: --with dev + run: make install-packages - name: Check for missing migrations - run: poetry run python manage.py makemigrations --no-input --dry-run --check + env: + opts: --no-input --dry-run --check + run: make django-make-migrations - name: Check for new typing errors - run: poetry run mypy . + run: make typecheck - name: Run Tests - run: poetry run pytest + run: make test diff --git a/Makefile b/Makefile index e3fb283..80629a6 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,9 @@ -POETRY_VERSION ?= 2.0.1 +.EXPORT_ALL_VARIABLES: + +POETRY_VERSION ?= 2.1.1 + +COMPOSE_FILE ?= docker/docker-compose.local.yml +COMPOSE_PROJECT_NAME ?= flagsmith-common .PHONY: install-pip install-pip: @@ -18,3 +23,36 @@ install: install-pip install-poetry install-packages .PHONY: lint lint: poetry run pre-commit run -a + +.PHONY: docker-up +docker-up: + docker compose up --force-recreate --remove-orphans -d + docker compose ps + +.PHONY: docker-down +docker-down: + docker compose stop + +.PHONY: test +test: + poetry run pytest $(opts) + +.PHONY: typecheck +typecheck: + poetry run mypy . + +.PHONY: django-make-migrations +django-make-migrations: + poetry run python manage.py waitfordb + poetry run python manage.py makemigrations $(opts) + +.PHONY: django-squash-migrations +django-squash-migrations: + poetry run python manage.py waitfordb + poetry run python manage.py squashmigrations $(opts) + +.PHONY: django-migrate +django-migrate: + poetry run python manage.py waitfordb + poetry run python manage.py migrate + poetry run python manage.py createcachetable diff --git a/docker/docker-compose.local.yml b/docker/docker-compose.local.yml new file mode 100644 index 0000000..deeaae2 --- /dev/null +++ b/docker/docker-compose.local.yml @@ -0,0 +1,19 @@ +# A Compose file with minimal dependencies to be able to run Flagsmith, including its test suite, locally (not in Docker). + +name: flagsmith + +volumes: + pg_data: + +services: + db: + image: postgres:15.5-alpine + pull_policy: always + restart: unless-stopped + volumes: + - pg_data:/var/lib/postgresql/data + ports: + - 5432:5432 + environment: + POSTGRES_DB: flagsmith + POSTGRES_PASSWORD: password diff --git a/poetry.lock b/poetry.lock index 94d6007..f157d0f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -27,6 +27,18 @@ files = [ [package.extras] tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +groups = ["main"] +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + [[package]] name = "certifi" version = "2025.1.31" @@ -240,7 +252,7 @@ files = [ ] [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "distlib" @@ -254,6 +266,22 @@ files = [ {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, ] +[[package]] +name = "dj-database-url" +version = "2.3.0" +description = "Use Database URLs in your Django Application." +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "dj_database_url-2.3.0-py3-none-any.whl", hash = "sha256:bb0d414ba0ac5cd62773ec7f86f8cc378a9dbb00a80884c2fc08cc570452521e"}, + {file = "dj_database_url-2.3.0.tar.gz", hash = "sha256:ae52e8e634186b57e5a45e445da5dc407a819c2ceed8a53d1fac004cc5288787"}, +] + +[package.dependencies] +Django = ">=4.2" +typing_extensions = ">=3.10.0.0" + [[package]] name = "django" version = "4.2.16" @@ -400,6 +428,31 @@ files = [ {file = "drf_writable_nested-0.7.0-py3-none-any.whl", hash = "sha256:154c0381e8a3a477e0fd539d5e1caf8ff4c1097a9c0c0fe741d4858b11b0455b"}, ] +[[package]] +name = "drf-yasg" +version = "1.21.10" +description = "Automated generation of real Swagger/OpenAPI 2.0 schemas from Django Rest Framework code." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "drf-yasg-1.21.10.tar.gz", hash = "sha256:f86d50faee3c31fcec4545985a871f832366c7fb5b77b62c48089d56ecf4f8d4"}, + {file = "drf_yasg-1.21.10-py3-none-any.whl", hash = "sha256:4d832e108dfe38e365101c36123576b498487d33bf27d57d6a37efb4cc773438"}, +] + +[package.dependencies] +django = ">=2.2.16" +djangorestframework = ">=3.10.3" +inflection = ">=0.3.1" +packaging = ">=21.0" +pytz = ">=2021.1" +pyyaml = ">=5.1" +uritemplate = ">=3.0.0" + +[package.extras] +coreapi = ["coreapi (>=2.3.3)", "coreschema (>=0.0.4)"] +validation = ["swagger-spec-validator (>=2.1.0)"] + [[package]] name = "environs" version = "14.1.1" @@ -419,7 +472,7 @@ python-dotenv = "*" [package.extras] dev = ["environs[tests]", "pre-commit (>=4.0,<5.0)", "tox"] django = ["dj-database-url", "dj-email-url", "django-cache-url"] -tests = ["backports.strenum", "environs[django]", "packaging", "pytest"] +tests = ["backports.strenum ; python_version < \"3.11\"", "environs[django]", "packaging", "pytest"] [[package]] name = "filelock" @@ -436,7 +489,7 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] [[package]] name = "flagsmith-flag-engine" @@ -521,6 +574,18 @@ files = [ [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] +[[package]] +name = "inflection" +version = "0.5.1" +description = "A port of Ruby on Rails inflector to Python" +optional = false +python-versions = ">=3.5" +groups = ["main"] +files = [ + {file = "inflection-0.5.1-py2.py3-none-any.whl", hash = "sha256:f38b2b640938a4f35ade69ac3d053042959b62a0f1076a5bbaa1b9526605a8a2"}, + {file = "inflection-0.5.1.tar.gz", hash = "sha256:1a29730d366e996aaacffb2f1f1cb9593dc38e2ddd30c91250c6dde09ea9b417"}, +] + [[package]] name = "iniconfig" version = "2.0.0" @@ -709,6 +774,26 @@ files = [ [package.extras] twisted = ["twisted"] +[[package]] +name = "psycopg2" +version = "2.9.10" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, + {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, + {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"}, + {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, + {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, + {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"}, + {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, + {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, + {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, +] + [[package]] name = "pydantic" version = "2.9.2" @@ -731,7 +816,7 @@ typing-extensions = [ [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and sys_platform == \"win32\""] [[package]] name = "pydantic-collections" @@ -1005,13 +1090,25 @@ files = [ [package.extras] cli = ["click (>=5.0)"] +[[package]] +name = "pytz" +version = "2025.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, + {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, +] + [[package]] name = "pyyaml" version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1143,13 +1240,133 @@ files = [ ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] -core = ["importlib_metadata (>=6)", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""] +core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] + +[[package]] +name = "simplejson" +version = "3.20.1" +description = "Simple, fast, extensible JSON encoder/decoder for Python" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.5" +groups = ["main"] +files = [ + {file = "simplejson-3.20.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f5272b5866b259fe6c33c4a8c5073bf8b359c3c97b70c298a2f09a69b52c7c41"}, + {file = "simplejson-3.20.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5c0de368f3052a59a1acf21f8b2dd28686a9e4eba2da7efae7ed9554cb31e7bc"}, + {file = "simplejson-3.20.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0821871404a537fd0e22eba240c74c0467c28af6cc435903eca394cfc74a0497"}, + {file = "simplejson-3.20.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:c939a1e576bded47d7d03aa2afc2ae90b928b2cf1d9dc2070ceec51fd463f430"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:3c4f0a61cdc05550782ca4a2cdb311ea196c2e6be6b24a09bf71360ca8c3ca9b"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:6c21f5c026ca633cfffcb6bc1fac2e99f65cb2b24657d3bef21aed9916cc3bbf"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:8d23b7f8d6b72319d6d55a0261089ff621ce87e54731c2d3de6a9bf7be5c028c"}, + {file = "simplejson-3.20.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:cda5c32a98f392909088111ecec23f2b0d39346ceae1a0fea23ab2d1f84ec21d"}, + {file = "simplejson-3.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e580aa65d5f6c3bf41b9b4afe74be5d5ddba9576701c107c772d936ea2b5043a"}, + {file = "simplejson-3.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a586ce4f78cec11f22fe55c5bee0f067e803aab9bad3441afe2181693b5ebb5"}, + {file = "simplejson-3.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74a1608f9e6e8c27a4008d70a54270868306d80ed48c9df7872f9f4b8ac87808"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03db8cb64154189a92a7786209f24e391644f3a3fa335658be2df2af1960b8d8"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eea7e2b7d858f6fdfbf0fe3cb846d6bd8a45446865bc09960e51f3d473c2271b"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e66712b17d8425bb7ff8968d4c7c7fd5a2dd7bd63728b28356223c000dd2f91f"}, + {file = "simplejson-3.20.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2cc4f6486f9f515b62f5831ff1888886619b84fc837de68f26d919ba7bbdcbc"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3c2df555ee4016148fa192e2b9cd9e60bc1d40769366134882685e90aee2a1e"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:78520f04b7548a5e476b5396c0847e066f1e0a4c0c5e920da1ad65e95f410b11"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f4bd49ecde87b0fe9f55cc971449a32832bca9910821f7072bbfae1155eaa007"}, + {file = "simplejson-3.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7eaae2b88eb5da53caaffdfa50e2e12022553949b88c0df4f9a9663609373f72"}, + {file = "simplejson-3.20.1-cp310-cp310-win32.whl", hash = "sha256:e836fb88902799eac8debc2b642300748f4860a197fa3d9ea502112b6bb8e142"}, + {file = "simplejson-3.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a19b552b212fc3b5b96fc5ce92333d4a9ac0a800803e1f17ebb16dac4be5"}, + {file = "simplejson-3.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:325b8c107253d3217e89d7b50c71015b5b31e2433e6c5bf38967b2f80630a8ca"}, + {file = "simplejson-3.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88a7baa8211089b9e58d78fbc1b0b322103f3f3d459ff16f03a36cece0d0fcf0"}, + {file = "simplejson-3.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:299b1007b8101d50d95bc0db1bf5c38dc372e85b504cf77f596462083ee77e3f"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ec618ed65caab48e81e3ed29586236a8e57daef792f1f3bb59504a7e98cd10"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2cdead1d3197f0ff43373cf4730213420523ba48697743e135e26f3d179f38"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3466d2839fdc83e1af42e07b90bc8ff361c4e8796cd66722a40ba14e458faddd"}, + {file = "simplejson-3.20.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d492ed8e92f3a9f9be829205f44b1d0a89af6582f0cf43e0d129fa477b93fe0c"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f924b485537b640dc69434565463fd6fc0c68c65a8c6e01a823dd26c9983cf79"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e8eacf6a3491bf76ea91a8d46726368a6be0eb94993f60b8583550baae9439e"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d34d04bf90b4cea7c22d8b19091633908f14a096caa301b24c2f3d85b5068fb8"}, + {file = "simplejson-3.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69dd28d4ce38390ea4aaf212902712c0fd1093dc4c1ff67e09687c3c3e15a749"}, + {file = "simplejson-3.20.1-cp311-cp311-win32.whl", hash = "sha256:dfe7a9da5fd2a3499436cd350f31539e0a6ded5da6b5b3d422df016444d65e43"}, + {file = "simplejson-3.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:896a6c04d7861d507d800da7642479c3547060bf97419d9ef73d98ced8258766"}, + {file = "simplejson-3.20.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f31c4a3a7ab18467ee73a27f3e59158255d1520f3aad74315edde7a940f1be23"}, + {file = "simplejson-3.20.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884e6183d16b725e113b83a6fc0230152ab6627d4d36cb05c89c2c5bccfa7bc6"}, + {file = "simplejson-3.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03d7a426e416fe0d3337115f04164cd9427eb4256e843a6b8751cacf70abc832"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000602141d0bddfcff60ea6a6e97d5e10c9db6b17fd2d6c66199fa481b6214bb"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af8377a8af78226e82e3a4349efdde59ffa421ae88be67e18cef915e4023a595"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c7de4c88ab2fbcb8781a3b982ef883696736134e20b1210bca43fb42ff1acf"}, + {file = "simplejson-3.20.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:455a882ff3f97d810709f7b620007d4e0aca8da71d06fc5c18ba11daf1c4df49"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fc0f523ce923e7f38eb67804bc80e0a028c76d7868500aa3f59225574b5d0453"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76461ec929282dde4a08061071a47281ad939d0202dc4e63cdd135844e162fbc"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19c2da8c043607bde4d4ef3a6b633e668a7d2e3d56f40a476a74c5ea71949f"}, + {file = "simplejson-3.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2578bedaedf6294415197b267d4ef678fea336dd78ee2a6d2f4b028e9d07be3"}, + {file = "simplejson-3.20.1-cp312-cp312-win32.whl", hash = "sha256:339f407373325a36b7fd744b688ba5bae0666b5d340ec6d98aebc3014bf3d8ea"}, + {file = "simplejson-3.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:627d4486a1ea7edf1f66bb044ace1ce6b4c1698acd1b05353c97ba4864ea2e17"}, + {file = "simplejson-3.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:71e849e7ceb2178344998cbe5ade101f1b329460243c79c27fbfc51c0447a7c3"}, + {file = "simplejson-3.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b63fdbab29dc3868d6f009a59797cefaba315fd43cd32ddd998ee1da28e50e29"}, + {file = "simplejson-3.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1190f9a3ce644fd50ec277ac4a98c0517f532cfebdcc4bd975c0979a9f05e1fb"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1336ba7bcb722ad487cd265701ff0583c0bb6de638364ca947bb84ecc0015d1"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e975aac6a5acd8b510eba58d5591e10a03e3d16c1cf8a8624ca177491f7230f0"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a6dd11ee282937ad749da6f3b8d87952ad585b26e5edfa10da3ae2536c73078"}, + {file = "simplejson-3.20.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab980fcc446ab87ea0879edad41a5c28f2d86020014eb035cf5161e8de4474c6"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f5aee2a4cb6b146bd17333ac623610f069f34e8f31d2f4f0c1a2186e50c594f0"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:652d8eecbb9a3b6461b21ec7cf11fd0acbab144e45e600c817ecf18e4580b99e"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c09948f1a486a89251ee3a67c9f8c969b379f6ffff1a6064b41fea3bce0a112"}, + {file = "simplejson-3.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cbbd7b215ad4fc6f058b5dd4c26ee5c59f72e031dfda3ac183d7968a99e4ca3a"}, + {file = "simplejson-3.20.1-cp313-cp313-win32.whl", hash = "sha256:ae81e482476eaa088ef9d0120ae5345de924f23962c0c1e20abbdff597631f87"}, + {file = "simplejson-3.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:1b9fd15853b90aec3b1739f4471efbf1ac05066a2c7041bf8db821bb73cd2ddc"}, + {file = "simplejson-3.20.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c7edf279c1376f28bf41e916c015a2a08896597869d57d621f55b6a30c7e1e6d"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9202b9de38f12e99a40addd1a8d508a13c77f46d87ab1f9095f154667f4fe81"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:391345b4157cc4e120027e013bd35c45e2c191e2bf48b8913af488cdc3b9243c"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6fdcc9debb711ddd2ad6d69f9386a3d9e8e253234bbb30513e0a7caa9510c51"}, + {file = "simplejson-3.20.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9daf8cdc7ee8a9e9f7a3b313ba0a003391857e90d0e82fbcd4d614aa05cb7c3b"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:c02f4868a3a46ffe284a51a88d134dc96feff6079a7115164885331a1ba8ed9f"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:3d7310172d5340febd258cb147f46aae30ad57c445f4d7e1ae8461c10aaf43b0"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:4762e05577955312a4c6802f58dd02e040cc79ae59cda510aa1564d84449c102"}, + {file = "simplejson-3.20.1-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:8bb98fdf318c05aefd08a92583bd6ee148e93c6756fb1befb7b2d5f27824be78"}, + {file = "simplejson-3.20.1-cp36-cp36m-win32.whl", hash = "sha256:9a74e70818818981294b8e6956ce3496c5e1bd4726ac864fae473197671f7b85"}, + {file = "simplejson-3.20.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e041add470e8f8535cc05509485eb7205729a84441f03b25cde80ad48823792e"}, + {file = "simplejson-3.20.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7e9d73f46119240e4f4f07868241749d67d09873f40cb968d639aa9ccc488b86"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae6e637dc24f8fee332ed23dd070e81394138e42cd4fd9d0923e5045ba122e27"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:efd3bc6c6b17e3d4620eb6be5196f0d1c08b6ce7c3101fa8e292b79e0908944b"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87fc623d457173a0213bc9ca4e346b83c9d443f63ed5cca847fb0cacea3cfc95"}, + {file = "simplejson-3.20.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec6a1e0a7aff76f0e008bebfa950188b9c50b58c1885d898145f48fc8e189a56"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:9c079606f461a6e950099167e21e13985147c8a24be8eea66c9ad68f73fad744"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:9faceb68fba27ef17eda306e4cd97a7b4b14fdadca5fbb15790ba8b26ebeec0c"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:7ceed598e4bacbf5133fe7a418f7991bb2df0683f3ac11fbf9e36a2bc7aa4b85"}, + {file = "simplejson-3.20.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ede69c765e9901861ad7c6139023b7b7d5807c48a2539d817b4ab40018002d5f"}, + {file = "simplejson-3.20.1-cp37-cp37m-win32.whl", hash = "sha256:d8853c269a4c5146ddca4aa7c70e631795e9d11239d5fedb1c6bbc91ffdebcac"}, + {file = "simplejson-3.20.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ed6a17fd397f0e2b3ad668fc9e19253ed2e3875ad9086bd7f795c29a3223f4a1"}, + {file = "simplejson-3.20.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7551682b60bba3a9e2780742e101cf0a64250e76de7d09b1c4b0c8a7c7cc6834"}, + {file = "simplejson-3.20.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bd9577ec1c8c3a43040e3787711e4c257c70035b7551a21854b5dec88dad09e1"}, + {file = "simplejson-3.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8e197e4cf6d42c2c57e7c52cd7c1e7b3e37c5911df1314fb393320131e2101"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bd09c8c75666e7f62a33d2f1fb57f81da1fcbb19a9fe7d7910b5756e1dd6048"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bd6bfe5678d73fbd5328eea6a35216503796428fc47f1237432522febaf3a0c"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71b75d448fd0ceb2e7c90e72bb82c41f8462550d48529980bc0bab1d2495bfbb"}, + {file = "simplejson-3.20.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7e15b716d09f318c8cda3e20f82fae81684ce3d3acd1d7770fa3007df1769de"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3e7963197d958fcf9e98b212b80977d56c022384621ff463d98afc3b6b1ce7e8"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2e671dd62051129185d3a9a92c60101f56cbc174854a1a3dfb69114ebd9e1699"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e25b2a0c396f3b84fb89573d07b0e1846ed563eb364f2ea8230ca92b8a8cb786"}, + {file = "simplejson-3.20.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:489c3a43116082bad56795215786313832ba3991cca1f55838e52a553f451ab6"}, + {file = "simplejson-3.20.1-cp38-cp38-win32.whl", hash = "sha256:4a92e948bad8df7fa900ba2ba0667a98303f3db206cbaac574935c332838208e"}, + {file = "simplejson-3.20.1-cp38-cp38-win_amd64.whl", hash = "sha256:49d059b8363327eee3c94799dd96782314b2dbd7bcc293b4ad48db69d6f4d362"}, + {file = "simplejson-3.20.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a8011f1dd1d676befcd4d675ebdbfdbbefd3bf350052b956ba8c699fca7d8cef"}, + {file = "simplejson-3.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e91703a4c5fec53e36875ae426ad785f4120bd1d93b65bed4752eeccd1789e0c"}, + {file = "simplejson-3.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e39eaa57c7757daa25bcd21f976c46be443b73dd6c3da47fe5ce7b7048ccefe2"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceab2ce2acdc7fbaa433a93006758db6ba9a659e80c4faa13b80b9d2318e9b17"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d4f320c33277a5b715db5bf5b10dae10c19076bd6d66c2843e04bd12d1f1ea5"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6436c48e64378fa844d8c9e58a5ed0352bbcfd4028369a9b46679b7ab79d2d"}, + {file = "simplejson-3.20.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e18345c8dda5d699be8166b61f9d80aaee4545b709f1363f60813dc032dac53"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:90b573693d1526bed576f6817e2a492eaaef68f088b57d7a9e83d122bbb49e51"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:272cc767826e924a6bd369ea3dbf18e166ded29059c7a4d64d21a9a22424b5b5"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:51b41f284d603c4380732d7d619f8b34bd04bc4aa0ed0ed5f4ffd0539b14da44"}, + {file = "simplejson-3.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6e6697a3067d281f01de0fe96fc7cba4ea870d96d7deb7bfcf85186d74456503"}, + {file = "simplejson-3.20.1-cp39-cp39-win32.whl", hash = "sha256:6dd3a1d5aca87bf947f3339b0f8e8e329f1badf548bdbff37fac63c17936da8e"}, + {file = "simplejson-3.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:463f1fca8fbf23d088e5850fdd0dd4d5faea8900a9f9680270bd98fd649814ca"}, + {file = "simplejson-3.20.1-py3-none-any.whl", hash = "sha256:8a6c1bbac39fa4a79f83cbf1df6ccd8ff7069582a9fd8db1e52cea073bc2c697"}, + {file = "simplejson-3.20.1.tar.gz", hash = "sha256:e64139b4ec4f1f24c142ff7dcafe55a22b811a74d86d66560c8815687143037d"}, +] [[package]] name = "six" @@ -1206,6 +1423,18 @@ files = [ [package.dependencies] urllib3 = ">=2" +[[package]] +name = "types-simplejson" +version = "3.20.0.20250326" +description = "Typing stubs for simplejson" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_simplejson-3.20.0.20250326-py3-none-any.whl", hash = "sha256:db1ddea7b8f7623b27a137578f22fc6c618db8c83ccfb1828ca0d2f0ec11efa7"}, + {file = "types_simplejson-3.20.0.20250326.tar.gz", hash = "sha256:b2689bc91e0e672d7a5a947b4cb546b76ae7ddc2899c6678e72a10bf96cd97d2"}, +] + [[package]] name = "typing-extensions" version = "4.12.2" @@ -1231,6 +1460,18 @@ files = [ {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] +[[package]] +name = "uritemplate" +version = "4.1.1" +description = "Implementation of RFC 6570 URI Templates" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "uritemplate-4.1.1-py2.py3-none-any.whl", hash = "sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e"}, + {file = "uritemplate-4.1.1.tar.gz", hash = "sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0"}, +] + [[package]] name = "urllib3" version = "2.3.0" @@ -1244,7 +1485,7 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -1268,9 +1509,9 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] [metadata] lock-version = "2.1" python-versions = ">=3.11,<4.0" -content-hash = "3b6e4cd52d0ed8f03bcfad0e1299404e30f032db1792b09a65850048603e3560" +content-hash = "3005f742451d90ecf950ebfbae482b06ea44a5ea062823722017d07ed4ae7959" diff --git a/pyproject.toml b/pyproject.toml index 4838d9c..84aaa01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,15 +4,19 @@ version = "1.5.2" description = "Flagsmith's common library" requires-python = ">=3.11,<4.0" dependencies = [ - "django (<5)", + "backoff (>=2.2.1,<3.0.0)", + "django (>4,<5)", "django-health-check", "djangorestframework-recursive", "djangorestframework", "drf-writable-nested", + "drf-yasg (>=1.21.10,<2.0.0)", + "environs (<15)", "flagsmith-flag-engine", "gunicorn (>=19.1)", "prometheus-client (>=0.0.16)", - "environs (<15)", + "psycopg2 (>=2,<3)", + "simplejson (>=3,<4)", ] authors = [ { name = "Matthew Elwell" }, @@ -48,9 +52,13 @@ classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", ] -packages = [{ include = "common", from = "src" }] +packages = [ + { include = "common", from = "src" }, + { include = "task_processor", from = "src" }, +] [tool.poetry.group.dev.dependencies] +dj-database-url = "^2.3.0" django-stubs = "^5.1.3" djangorestframework-stubs = "^3.15.3" mypy = "^1.15.0" @@ -65,6 +73,7 @@ pytest-mock = "^3.14.0" requests = "^2.32.3" ruff = "*" setuptools = "^77.0.3" +types-simplejson = "^3.20.0.20250326" [build-system] requires = ["poetry-core"] diff --git a/settings/dev.py b/settings/dev.py index 3050782..e9a5a64 100644 --- a/settings/dev.py +++ b/settings/dev.py @@ -1,4 +1,12 @@ +from datetime import time, timedelta + +import dj_database_url import prometheus_client +from environs import Env + +from task_processor.task_run_method import TaskRunMethod + +env = Env() # Settings expected by `mypy_django_plugin` AWS_SES_REGION_ENDPOINT: str @@ -8,15 +16,18 @@ # Settings required for tests DATABASES = { - "default": { - "ENGINE": "django.db.backends.sqlite3", - "NAME": "common.sqlite3", - } + "default": dj_database_url.parse( + env( + "DATABASE_URL", + default="postgresql://postgres:password@localhost:5432/flagsmith", + ) + ) } INSTALLED_APPS = [ "django.contrib.auth", "django.contrib.contenttypes", "common.core", + "task_processor", ] MIDDLEWARE = [ "common.gunicorn.middleware.RouteLoggerMiddleware", @@ -26,3 +37,13 @@ ROOT_URLCONF = "common.core.urls" TIME_ZONE = "UTC" USE_TZ = True + +ENABLE_CLEAN_UP_OLD_TASKS = True +ENABLE_TASK_PROCESSOR_HEALTH_CHECK = True +RECURRING_TASK_RUN_RETENTION_DAYS = 15 +TASK_DELETE_BATCH_SIZE = 2000 +TASK_DELETE_INCLUDE_FAILED_TASKS = False +TASK_DELETE_RETENTION_DAYS = 15 +TASK_DELETE_RUN_EVERY = timedelta(days=1) +TASK_DELETE_RUN_TIME = time(5, 0, 0) +TASK_RUN_METHOD = TaskRunMethod.TASK_PROCESSOR diff --git a/src/common/core/management/commands/start.py b/src/common/core/management/commands/start.py index ae41564..d7eed62 100644 --- a/src/common/core/management/commands/start.py +++ b/src/common/core/management/commands/start.py @@ -3,7 +3,10 @@ from django.core.management import BaseCommand, CommandParser from django.utils.module_loading import autodiscover_modules -from common.gunicorn.utils import add_arguments, run_server +from common.gunicorn.utils import add_arguments as add_gunicorn_arguments +from common.gunicorn.utils import run_server +from task_processor.utils import add_arguments as add_task_processor_arguments +from task_processor.utils import start_task_processor class Command(BaseCommand): @@ -13,20 +16,31 @@ def create_parser(self, *args: Any, **kwargs: Any) -> CommandParser: return super().create_parser(*args, conflict_handler="resolve", **kwargs) def add_arguments(self, parser: CommandParser) -> None: - add_arguments(parser) + add_gunicorn_arguments(parser) subparsers = parser.add_subparsers( title="sub-commands", required=True, ) + api_parser = subparsers.add_parser( "api", help="Start the Core API.", ) api_parser.set_defaults(handle_method=self.handle_api) + task_processor_parser = subparsers.add_parser( + "task-processor", + help="Start the Task Processor.", + ) + task_processor_parser.set_defaults(handle_method=self.handle_task_processor) + add_task_processor_arguments(task_processor_parser) + def initialise(self) -> None: - autodiscover_modules("metrics") + autodiscover_modules( + "metrics", + "tasks", + ) def handle( self, @@ -39,3 +53,9 @@ def handle( def handle_api(self, *args: Any, **options: Any) -> None: run_server(options) + + def handle_task_processor(self, *args: Any, **options: Any) -> None: + with start_task_processor(options): + # Delegate signal handling to Gunicorn. + # The task processor will finalise once Gunicorn is shut down. + run_server(options) diff --git a/src/common/core/management/commands/waitfordb.py b/src/common/core/management/commands/waitfordb.py new file mode 100644 index 0000000..91c3746 --- /dev/null +++ b/src/common/core/management/commands/waitfordb.py @@ -0,0 +1,87 @@ +import logging +import time +from argparse import ArgumentParser +from typing import Any + +from django.core.management import BaseCommand +from django.db import OperationalError, connections +from django.db.migrations.executor import MigrationExecutor + +logger = logging.getLogger(__name__) + + +class Command(BaseCommand): + def add_arguments(self, parser: ArgumentParser) -> None: + parser.add_argument( + "--waitfor", + type=int, + dest="wait_for", + help="Number of seconds to wait for db to become available.", + default=5, + ) + parser.add_argument( + "--migrations", + action="store_true", + dest="should_wait_for_migrations", + help="Whether to wait until all migrations are applied.", + default=False, + ) + parser.add_argument( + "--database", + type=str, + dest="database", + help=( + 'The database to wait for ("default", "analytics").' + 'Defaults to the "default" database.' + ), + default="default", + ) + + def handle( + self, + *args: Any, + wait_for: int, + should_wait_for_migrations: bool, + database: str, + **options: Any, + ) -> None: + start = time.monotonic() + wait_between_checks = 0.25 + + logger.info("Checking if database is ready for connections.") + + while True: + if time.monotonic() - start > wait_for: + msg = f"Failed to connect to DB within {wait_for} seconds." + logger.error(msg) + exit(msg) + + conn = connections.create_connection(database) + try: + with conn.temporary_connection() as cursor: + cursor.execute("SELECT 1") + logger.info("Successfully connected to the database.") + break + except OperationalError as e: + logger.warning("Database not yet ready for connections.") + logger.warning("Error was: %s: %s", e.__class__.__name__, e) + + time.sleep(wait_between_checks) + + if should_wait_for_migrations: + logger.info("Checking for applied migrations.") + + while True: + if time.monotonic() - start > wait_for: + msg = f"Didn't detect applied migrations for {wait_for} seconds." + logger.error(msg) + exit(msg) + + conn = connections[database] + executor = MigrationExecutor(conn) + if not executor.migration_plan(executor.loader.graph.leaf_nodes()): + logger.info("No pending migrations detected, good to go.") + return + + logger.warning("Migrations not yet applied.") + time.sleep(wait_between_checks) diff --git a/src/task_processor/__init__.py b/src/task_processor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/task_processor/admin.py b/src/task_processor/admin.py new file mode 100644 index 0000000..6284a76 --- /dev/null +++ b/src/task_processor/admin.py @@ -0,0 +1,38 @@ +from datetime import datetime + +from django.contrib import admin +from django.db.models import QuerySet +from django.http import HttpRequest + +from task_processor.models import RecurringTask + + +@admin.register(RecurringTask) +class RecurringTaskAdmin(admin.ModelAdmin[RecurringTask]): + list_display = ( + "uuid", + "task_identifier", + "run_every", + "last_run_status", + "last_run_finished_at", + "is_locked", + ) + readonly_fields = ("args", "kwargs") + + def last_run_status(self, instance: RecurringTask) -> str | None: + if last_run := instance.task_runs.order_by("-started_at").first(): + return last_run.result + return None + + def last_run_finished_at(self, instance: RecurringTask) -> datetime | None: + if last_run := instance.task_runs.order_by("-started_at").first(): + return last_run.finished_at + return None + + @admin.action(description="Unlock selected tasks") + def unlock( + self, + request: HttpRequest, + queryset: QuerySet[RecurringTask], + ) -> None: + queryset.update(is_locked=False) diff --git a/src/task_processor/apps.py b/src/task_processor/apps.py new file mode 100644 index 0000000..6e76fc1 --- /dev/null +++ b/src/task_processor/apps.py @@ -0,0 +1,18 @@ +from django.apps import AppConfig +from django.conf import settings +from health_check.plugins import plugin_dir # type: ignore[import-untyped] + +from task_processor.task_run_method import TaskRunMethod + + +class TaskProcessorAppConfig(AppConfig): + name = "task_processor" + + def ready(self) -> None: + if ( + settings.ENABLE_TASK_PROCESSOR_HEALTH_CHECK + and settings.TASK_RUN_METHOD == TaskRunMethod.TASK_PROCESSOR + ): + from .health import TaskProcessorHealthCheckBackend + + plugin_dir.register(TaskProcessorHealthCheckBackend) diff --git a/src/task_processor/decorators.py b/src/task_processor/decorators.py new file mode 100644 index 0000000..8e865b9 --- /dev/null +++ b/src/task_processor/decorators.py @@ -0,0 +1,206 @@ +import logging +import os +import typing +from datetime import datetime, time, timedelta +from threading import Thread + +from django.conf import settings +from django.db.transaction import on_commit +from django.utils import timezone + +from task_processor import task_registry +from task_processor.exceptions import InvalidArgumentsError, TaskQueueFullError +from task_processor.models import RecurringTask, Task, TaskPriority +from task_processor.task_run_method import TaskRunMethod +from task_processor.types import TaskCallable, TaskParameters +from task_processor.utils import get_task_identifier_from_function + +logger = logging.getLogger(__name__) + + +class TaskHandler(typing.Generic[TaskParameters]): + __slots__ = ( + "unwrapped", + "queue_size", + "priority", + "transaction_on_commit", + "task_identifier", + "timeout", + ) + + unwrapped: TaskCallable[TaskParameters] + + def __init__( + self, + f: TaskCallable[TaskParameters], + *, + task_name: str | None = None, + queue_size: int | None = None, + priority: TaskPriority = TaskPriority.NORMAL, + transaction_on_commit: bool = True, + timeout: timedelta | None = None, + ) -> None: + self.unwrapped = f + self.queue_size = queue_size + self.priority = priority + self.transaction_on_commit = transaction_on_commit + self.timeout = timeout + + self.task_identifier = task_identifier = get_task_identifier_from_function( + f, + task_name, + ) + task_registry.register_task(task_identifier, f) + + def __call__( + self, + *args: TaskParameters.args, + **kwargs: TaskParameters.kwargs, + ) -> None: + _validate_inputs(*args, **kwargs) + return self.unwrapped(*args, **kwargs) + + def delay( + self, + *, + delay_until: datetime | None = None, + # TODO @khvn26 consider typing `args` and `kwargs` with `ParamSpec` + # (will require a change to the signature) + args: tuple[typing.Any, ...] = (), + kwargs: dict[str, typing.Any] | None = None, + ) -> Task | None: + logger.debug("Request to run task '%s' asynchronously.", self.task_identifier) + + kwargs = kwargs or {} + + if delay_until and settings.TASK_RUN_METHOD != TaskRunMethod.TASK_PROCESSOR: + # TODO: consider not having this silently fail? + logger.warning( + "Cannot schedule tasks to run in the future without task processor." + ) + return None + + if settings.TASK_RUN_METHOD == TaskRunMethod.SYNCHRONOUSLY: + _validate_inputs(*args, **kwargs) + self.unwrapped(*args, **kwargs) + elif settings.TASK_RUN_METHOD == TaskRunMethod.SEPARATE_THREAD: + logger.debug("Running task '%s' in separate thread", self.task_identifier) + self.run_in_thread(args=args, kwargs=kwargs) + else: + logger.debug("Creating task for function '%s'...", self.task_identifier) + try: + task = Task.create( + task_identifier=self.task_identifier, + scheduled_for=delay_until or timezone.now(), + priority=self.priority, + queue_size=self.queue_size, + timeout=self.timeout, + args=args, + kwargs=kwargs, + ) + except TaskQueueFullError as e: + logger.warning(e) + return None + + task.save() + return task + return None + + def run_in_thread( + self, + *, + args: tuple[typing.Any, ...] = (), + kwargs: dict[str, typing.Any] | None = None, + ) -> None: + kwargs = kwargs or {} + _validate_inputs(*args, **kwargs) + thread = Thread(target=self.unwrapped, args=args, kwargs=kwargs, daemon=True) + + def _start() -> None: + logger.info( + "Running function %s in unmanaged thread.", self.unwrapped.__name__ + ) + thread.start() + + if self.transaction_on_commit: + return on_commit(_start) + return _start() + + +def register_task_handler( # noqa: C901 + *, + task_name: str | None = None, + queue_size: int | None = None, + priority: TaskPriority = TaskPriority.NORMAL, + transaction_on_commit: bool = True, + timeout: timedelta | None = timedelta(seconds=60), +) -> typing.Callable[[TaskCallable[TaskParameters]], TaskHandler[TaskParameters]]: + """ + Turn a function into an asynchronous task. + + :param str task_name: task name. Defaults to function name. + :param int queue_size: (`TASK_PROCESSOR` task run method only) + max queue size for the task. Task runs exceeding the max size get dropped by + the task processor Defaults to `None` (infinite). + :param TaskPriority priority: task priority. + :param bool transaction_on_commit: (`SEPARATE_THREAD` task run method only) + Whether to wrap the task call in `transanction.on_commit`. Defaults to `True`. + We need this for the task to be able to access data committed with the current + transaction. If the task is invoked outside of a transaction, it will start + immediately. + Pass `False` if you want the task to start immediately regardless of current + transaction. + :rtype: TaskHandler + """ + + def wrapper(f: TaskCallable[TaskParameters]) -> TaskHandler[TaskParameters]: + return TaskHandler( + f, + task_name=task_name, + queue_size=queue_size, + priority=priority, + transaction_on_commit=transaction_on_commit, + timeout=timeout, + ) + + return wrapper + + +def register_recurring_task( + run_every: timedelta, + task_name: str | None = None, + args: tuple[typing.Any, ...] = (), + kwargs: dict[str, typing.Any] | None = None, + first_run_time: time | None = None, + timeout: timedelta | None = timedelta(minutes=30), +) -> typing.Callable[[TaskCallable[TaskParameters]], TaskCallable[TaskParameters]]: + if not os.environ.get("RUN_BY_PROCESSOR"): + # Do not register recurring tasks if not invoked by task processor + return lambda f: f + + def decorator(f: TaskCallable[TaskParameters]) -> TaskCallable[TaskParameters]: + nonlocal task_name + + task_name = task_name or f.__name__ + task_identifier = get_task_identifier_from_function(f, task_name) + + task_kwargs = { + "serialized_args": RecurringTask.serialize_data(args or ()), + "serialized_kwargs": RecurringTask.serialize_data(kwargs or {}), + "run_every": run_every, + "first_run_time": first_run_time, + "timeout": timeout, + } + + task_registry.register_recurring_task(task_identifier, f, **task_kwargs) + return f + + return decorator + + +def _validate_inputs(*args: typing.Any, **kwargs: typing.Any) -> None: + try: + Task.serialize_data(args or ()) + Task.serialize_data(kwargs or {}) + except TypeError as e: + raise InvalidArgumentsError("Inputs are not serializable.") from e diff --git a/src/task_processor/exceptions.py b/src/task_processor/exceptions.py new file mode 100644 index 0000000..7f697a6 --- /dev/null +++ b/src/task_processor/exceptions.py @@ -0,0 +1,10 @@ +class TaskProcessingError(Exception): + pass + + +class InvalidArgumentsError(TaskProcessingError): + pass + + +class TaskQueueFullError(Exception): + pass diff --git a/src/task_processor/health.py b/src/task_processor/health.py new file mode 100644 index 0000000..ccd0427 --- /dev/null +++ b/src/task_processor/health.py @@ -0,0 +1,44 @@ +import uuid + +import backoff +from health_check.backends import BaseHealthCheckBackend # type: ignore[import-untyped] +from health_check.exceptions import HealthCheckException # type: ignore[import-untyped] + +from task_processor.models import HealthCheckModel +from task_processor.tasks import create_health_check_model + + +def is_processor_healthy(max_tries: int = 5, factor: float = 0.1) -> bool: + health_check_model_uuid = str(uuid.uuid4()) + + create_health_check_model.delay(args=(health_check_model_uuid,)) + + @backoff.on_predicate( + backoff.expo, + lambda m: m is None, + max_tries=max_tries, + factor=factor, + jitter=None, + ) + def get_health_check_model() -> HealthCheckModel | None: + return HealthCheckModel.objects.filter(uuid=health_check_model_uuid).first() + + health_check_model = get_health_check_model() + if health_check_model: + health_check_model.delete() + return True + + return False + + +class TaskProcessorHealthCheckBackend(BaseHealthCheckBackend): # type: ignore[misc] + #: The status endpoints will respond with a 200 status code + #: even if the check errors. + critical_service = False + + def check_status(self) -> None: + if not is_processor_healthy(): + raise HealthCheckException("Task processor is unable to process tasks.") + + def identifier(self) -> str: + return self.__class__.__name__ # Display name on the endpoint. diff --git a/src/task_processor/managers.py b/src/task_processor/managers.py new file mode 100644 index 0000000..dd8ad4a --- /dev/null +++ b/src/task_processor/managers.py @@ -0,0 +1,18 @@ +import typing + +from django.db.models import Manager + +if typing.TYPE_CHECKING: + from django.db.models.query import RawQuerySet + + from task_processor.models import RecurringTask, Task + + +class TaskManager(Manager["Task"]): + def get_tasks_to_process(self, num_tasks: int) -> "RawQuerySet[Task]": + return self.raw("SELECT * FROM get_tasks_to_process(%s)", [num_tasks]) + + +class RecurringTaskManager(Manager["RecurringTask"]): + def get_tasks_to_process(self) -> "RawQuerySet[RecurringTask]": + return self.raw("SELECT * FROM get_recurringtasks_to_process()") diff --git a/src/task_processor/migrations/0001_initial.py b/src/task_processor/migrations/0001_initial.py new file mode 100644 index 0000000..83a3119 --- /dev/null +++ b/src/task_processor/migrations/0001_initial.py @@ -0,0 +1,44 @@ +# Generated by Django 3.2.14 on 2022-08-02 11:25 + +from django.db import migrations, models +import django.db.models.deletion +import django.utils.timezone +import uuid + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Task', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, unique=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('scheduled_for', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)), + ('task_identifier', models.CharField(max_length=200)), + ('serialized_args', models.TextField(blank=True, null=True)), + ('serialized_kwargs', models.TextField(blank=True, null=True)), + ('num_failures', models.IntegerField(default=0)), + ], + options={ + 'index_together': {('scheduled_for', 'num_failures')}, + }, + ), + migrations.CreateModel( + name='TaskRun', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('started_at', models.DateTimeField()), + ('finished_at', models.DateTimeField(blank=True, null=True)), + ('result', models.CharField(blank=True, choices=[('SUCCESS', 'Success'), ('FAILURE', 'Failure')], db_index=True, max_length=50, null=True)), + ('error_details', models.TextField(blank=True, null=True)), + ('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_runs', to='task_processor.task')), + ], + ), + ] diff --git a/src/task_processor/migrations/0002_healthcheckmodel.py b/src/task_processor/migrations/0002_healthcheckmodel.py new file mode 100644 index 0000000..f3938a5 --- /dev/null +++ b/src/task_processor/migrations/0002_healthcheckmodel.py @@ -0,0 +1,21 @@ +# Generated by Django 3.2.14 on 2022-08-12 11:39 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('task_processor', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='HealthCheckModel', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('uuid', models.UUIDField(unique=True)), + ], + ), + ] diff --git a/src/task_processor/migrations/0003_add_completed_to_task.py b/src/task_processor/migrations/0003_add_completed_to_task.py new file mode 100644 index 0000000..38255d2 --- /dev/null +++ b/src/task_processor/migrations/0003_add_completed_to_task.py @@ -0,0 +1,22 @@ +# Generated by Django 3.2.15 on 2022-08-24 13:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('task_processor', '0002_healthcheckmodel'), + ] + + operations = [ + migrations.AddField( + model_name='task', + name='completed', + field=models.BooleanField(default=False), + ), + migrations.AlterIndexTogether( + name='task', + index_together={('scheduled_for', 'num_failures', 'completed')}, + ), + ] diff --git a/src/task_processor/migrations/0004_recreate_task_indexes.py b/src/task_processor/migrations/0004_recreate_task_indexes.py new file mode 100644 index 0000000..4919518 --- /dev/null +++ b/src/task_processor/migrations/0004_recreate_task_indexes.py @@ -0,0 +1,43 @@ +# Generated by Django 3.2.15 on 2022-10-07 09:53 + +from django.db import migrations, models + +from task_processor.migrations.helpers import PostgresOnlyRunSQL + + +class Migration(migrations.Migration): + + atomic = False + + dependencies = [ + ("task_processor", "0003_add_completed_to_task"), + ] + + operations = [ + migrations.SeparateDatabaseAndState( + state_operations=[ + migrations.AlterIndexTogether( + name="task", + index_together=set(), + ), + migrations.AddIndex( + model_name="task", + index=models.Index( + condition=models.Q(("completed", False)), + fields=["num_failures", "scheduled_for"], + name="incomplete_tasks_idx", + ), + ), + ], + database_operations=[ + PostgresOnlyRunSQL( + "DROP INDEX CONCURRENTLY task_processor_task_scheduled_for_num_failur_17d6dc77_idx;", + reverse_sql='CREATE INDEX "task_processor_task_scheduled_for_num_failur_17d6dc77_idx" ON "task_processor_task" ("scheduled_for", "num_failures", "completed");', + ), + PostgresOnlyRunSQL( + 'CREATE INDEX CONCURRENTLY "incomplete_tasks_idx" ON "task_processor_task" ("num_failures", "scheduled_for") WHERE NOT "completed";', + reverse_sql='DROP INDEX CONCURRENTLY "incomplete_tasks_idx";', + ), + ], + ), + ] diff --git a/src/task_processor/migrations/0005_update_conditional_index_conditions.py b/src/task_processor/migrations/0005_update_conditional_index_conditions.py new file mode 100644 index 0000000..2b569b7 --- /dev/null +++ b/src/task_processor/migrations/0005_update_conditional_index_conditions.py @@ -0,0 +1,45 @@ +# Generated by Django 3.2.15 on 2022-10-07 11:16 + +from django.db import migrations, models + +from task_processor.migrations.helpers import PostgresOnlyRunSQL + + +class Migration(migrations.Migration): + + atomic = False + + dependencies = [ + ("task_processor", "0004_recreate_task_indexes"), + ] + + operations = [ + migrations.SeparateDatabaseAndState( + state_operations=[ + migrations.RemoveIndex( + model_name="task", + name="incomplete_tasks_idx", + ), + migrations.AddIndex( + model_name="task", + index=models.Index( + condition=models.Q( + ("completed", False), ("num_failures__lt", 3) + ), + fields=["scheduled_for"], + name="incomplete_tasks_idx", + ), + ), + ], + database_operations=[ + PostgresOnlyRunSQL( + 'DROP INDEX CONCURRENTLY "incomplete_tasks_idx";', + reverse_sql='CREATE INDEX CONCURRENTLY "incomplete_tasks_idx" ON "task_processor_task" ("num_failures", "scheduled_for") WHERE NOT "completed";', + ), + PostgresOnlyRunSQL( + 'CREATE INDEX CONCURRENTLY "incomplete_tasks_idx" ON "task_processor_task" ("scheduled_for") WHERE (NOT "completed" and "num_failures" < 3);', + reverse_sql='DROP INDEX CONCURRENTLY "incomplete_tasks_idx";', + ), + ], + ) + ] diff --git a/src/task_processor/migrations/0006_auto_20230221_0802.py b/src/task_processor/migrations/0006_auto_20230221_0802.py new file mode 100644 index 0000000..f453aab --- /dev/null +++ b/src/task_processor/migrations/0006_auto_20230221_0802.py @@ -0,0 +1,45 @@ +# Generated by Django 3.2.16 on 2023-02-21 08:02 + +from django.db import migrations, models +import django.db.models.deletion +import uuid + + +class Migration(migrations.Migration): + + dependencies = [ + ('task_processor', '0005_update_conditional_index_conditions'), + ] + + operations = [ + migrations.CreateModel( + name='RecurringTask', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('uuid', models.UUIDField(default=uuid.uuid4, unique=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('task_identifier', models.CharField(max_length=200)), + ('serialized_args', models.TextField(blank=True, null=True)), + ('serialized_kwargs', models.TextField(blank=True, null=True)), + ('run_every', models.DurationField()), + ], + ), + migrations.CreateModel( + name='RecurringTaskRun', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('started_at', models.DateTimeField()), + ('finished_at', models.DateTimeField(blank=True, null=True)), + ('result', models.CharField(blank=True, choices=[('SUCCESS', 'Success'), ('FAILURE', 'Failure')], db_index=True, max_length=50, null=True)), + ('error_details', models.TextField(blank=True, null=True)), + ('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='task_runs', to='task_processor.recurringtask')), + ], + options={ + 'abstract': False, + }, + ), + migrations.AddConstraint( + model_name='recurringtask', + constraint=models.UniqueConstraint(fields=('task_identifier', 'run_every'), name='unique_run_every_tasks'), + ), + ] diff --git a/src/task_processor/migrations/0007_add_is_locked.py b/src/task_processor/migrations/0007_add_is_locked.py new file mode 100644 index 0000000..71b21a3 --- /dev/null +++ b/src/task_processor/migrations/0007_add_is_locked.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.18 on 2023-04-20 02:43 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('task_processor', '0006_auto_20230221_0802'), + ] + + operations = [ + migrations.AddField( + model_name='recurringtask', + name='is_locked', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='task', + name='is_locked', + field=models.BooleanField(default=False), + ), + ] diff --git a/src/task_processor/migrations/0008_add_get_task_to_process_function.py b/src/task_processor/migrations/0008_add_get_task_to_process_function.py new file mode 100644 index 0000000..21cca62 --- /dev/null +++ b/src/task_processor/migrations/0008_add_get_task_to_process_function.py @@ -0,0 +1,31 @@ +# Generated by Django 3.2.18 on 2023-04-20 02:45 + +from django.db import migrations + +from task_processor.migrations.helpers import PostgresOnlyRunSQL +import os + + +class Migration(migrations.Migration): + dependencies = [ + ("task_processor", "0007_add_is_locked"), + ] + + operations = [ + PostgresOnlyRunSQL.from_sql_file( + os.path.join( + os.path.dirname(__file__), + "sql", + "0008_get_tasks_to_process.sql", + ), + reverse_sql="DROP FUNCTION IF EXISTS get_tasks_to_process", + ), + PostgresOnlyRunSQL.from_sql_file( + os.path.join( + os.path.dirname(__file__), + "sql", + "0008_get_recurring_tasks_to_process.sql", + ), + reverse_sql="DROP FUNCTION IF EXISTS get_recurringtasks_to_process", + ), + ] diff --git a/src/task_processor/migrations/0009_add_recurring_task_run_first_run_at.py b/src/task_processor/migrations/0009_add_recurring_task_run_first_run_at.py new file mode 100644 index 0000000..c2d9147 --- /dev/null +++ b/src/task_processor/migrations/0009_add_recurring_task_run_first_run_at.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.18 on 2023-04-05 13:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('task_processor', '0008_add_get_task_to_process_function'), + ] + + operations = [ + migrations.AddField( + model_name='recurringtask', + name='first_run_time', + field=models.TimeField(blank=True, null=True), + ), + ] diff --git a/src/task_processor/migrations/0010_task_priority.py b/src/task_processor/migrations/0010_task_priority.py new file mode 100644 index 0000000..c1b4121 --- /dev/null +++ b/src/task_processor/migrations/0010_task_priority.py @@ -0,0 +1,27 @@ +# Generated by Django 3.2.20 on 2023-10-13 06:04 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("task_processor", "0009_add_recurring_task_run_first_run_at"), + ] + + operations = [ + migrations.AddField( + model_name="task", + name="priority", + field=models.SmallIntegerField( + choices=[ + (100, "Lower"), + (75, "Low"), + (50, "Normal"), + (25, "High"), + (0, "Highest"), + ], + default=None, + null=True, + ), + ), + ] diff --git a/src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py b/src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py new file mode 100644 index 0000000..2270d7f --- /dev/null +++ b/src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py @@ -0,0 +1,27 @@ +# Generated by Django 3.2.20 on 2023-10-13 04:44 + +from django.db import migrations + +from task_processor.migrations.helpers import PostgresOnlyRunSQL +import os + + +class Migration(migrations.Migration): + dependencies = [ + ("task_processor", "0010_task_priority"), + ] + + operations = [ + PostgresOnlyRunSQL.from_sql_file( + os.path.join( + os.path.dirname(__file__), + "sql", + "0011_get_tasks_to_process.sql", + ), + reverse_sql=os.path.join( + os.path.dirname(__file__), + "sql", + "0008_get_tasks_to_process.sql", + ), + ), + ] diff --git a/src/task_processor/migrations/0012_add_locked_at_and_timeout.py b/src/task_processor/migrations/0012_add_locked_at_and_timeout.py new file mode 100644 index 0000000..866d40e --- /dev/null +++ b/src/task_processor/migrations/0012_add_locked_at_and_timeout.py @@ -0,0 +1,39 @@ +# Generated by Django 3.2.23 on 2025-01-06 04:51 + +from task_processor.migrations.helpers import PostgresOnlyRunSQL +import datetime +from django.db import migrations, models +import os + + +class Migration(migrations.Migration): + + dependencies = [ + ("task_processor", "0011_add_priority_to_get_tasks_to_process"), + ] + + operations = [ + migrations.AddField( + model_name="recurringtask", + name="locked_at", + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name="recurringtask", + name="timeout", + field=models.DurationField(default=datetime.timedelta(minutes=30)), + ), + migrations.AddField( + model_name="task", + name="timeout", + field=models.DurationField(blank=True, null=True), + ), + PostgresOnlyRunSQL.from_sql_file( + os.path.join( + os.path.dirname(__file__), + "sql", + "0012_get_recurringtasks_to_process.sql", + ), + reverse_sql="DROP FUNCTION IF EXISTS get_recurringtasks_to_process()", + ), + ] diff --git a/src/task_processor/migrations/__init__.py b/src/task_processor/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/task_processor/migrations/helpers/__init__.py b/src/task_processor/migrations/helpers/__init__.py new file mode 100644 index 0000000..4fb432d --- /dev/null +++ b/src/task_processor/migrations/helpers/__init__.py @@ -0,0 +1,9 @@ +""" +Note: django doesn't support adding submodules to the migrations module directory +that don't include a Migration class. As such, I've defined this helpers submodule +and simplified the imports by defining the __all__ attribute. +""" + +from task_processor.migrations.helpers.postgres_helpers import PostgresOnlyRunSQL + +__all__ = ["PostgresOnlyRunSQL"] diff --git a/src/task_processor/migrations/helpers/postgres_helpers.py b/src/task_processor/migrations/helpers/postgres_helpers.py new file mode 100644 index 0000000..f3de3aa --- /dev/null +++ b/src/task_processor/migrations/helpers/postgres_helpers.py @@ -0,0 +1,41 @@ +from contextlib import suppress + +from django.db import migrations +from django.db.backends.base.schema import BaseDatabaseSchemaEditor +from django.db.migrations.state import ProjectState + + +class PostgresOnlyRunSQL(migrations.RunSQL): + @classmethod + def from_sql_file( + cls, + file_path: str, + reverse_sql: str = "", + ) -> "PostgresOnlyRunSQL": + with open(file_path) as forward_sql: + with suppress(FileNotFoundError): + with open(reverse_sql) as reverse_sql_file: + reverse_sql = reverse_sql_file.read() + return cls(forward_sql.read(), reverse_sql=reverse_sql) + + def database_forwards( + self, + app_label: str, + schema_editor: BaseDatabaseSchemaEditor, + from_state: ProjectState, + to_state: ProjectState, + ) -> None: + if schema_editor.connection.vendor != "postgresql": + return + super().database_forwards(app_label, schema_editor, from_state, to_state) + + def database_backwards( + self, + app_label: str, + schema_editor: BaseDatabaseSchemaEditor, + from_state: ProjectState, + to_state: ProjectState, + ) -> None: + if schema_editor.connection.vendor != "postgresql": + return + super().database_backwards(app_label, schema_editor, from_state, to_state) diff --git a/src/task_processor/migrations/sql/0008_get_recurring_tasks_to_process.sql b/src/task_processor/migrations/sql/0008_get_recurring_tasks_to_process.sql new file mode 100644 index 0000000..acb9e4d --- /dev/null +++ b/src/task_processor/migrations/sql/0008_get_recurring_tasks_to_process.sql @@ -0,0 +1,30 @@ +CREATE OR REPLACE FUNCTION get_recurringtasks_to_process(num_tasks integer) +RETURNS SETOF task_processor_recurringtask AS $$ +DECLARE + row_to_return task_processor_recurringtask; +BEGIN + -- Select the tasks that needs to be processed + FOR row_to_return IN + SELECT * + FROM task_processor_recurringtask + WHERE is_locked = FALSE + ORDER BY id + LIMIT num_tasks + -- Select for update to ensure that no other workers can select these tasks while in this transaction block + FOR UPDATE SKIP LOCKED + LOOP + -- Lock every selected task(by updating `is_locked` to true) + UPDATE task_processor_recurringtask + -- Lock this row by setting is_locked True, so that no other workers can select these tasks after this + -- transaction is complete (but the tasks are still being executed by the current worker) + SET is_locked = TRUE + WHERE id = row_to_return.id; + -- If we don't explicitly update the `is_locked` column here, the client will receive the row that is actually locked but has the `is_locked` value set to `False`. + row_to_return.is_locked := TRUE; + RETURN NEXT row_to_return; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql + diff --git a/src/task_processor/migrations/sql/0008_get_tasks_to_process.sql b/src/task_processor/migrations/sql/0008_get_tasks_to_process.sql new file mode 100644 index 0000000..ba1072d --- /dev/null +++ b/src/task_processor/migrations/sql/0008_get_tasks_to_process.sql @@ -0,0 +1,30 @@ +CREATE OR REPLACE FUNCTION get_tasks_to_process(num_tasks integer) +RETURNS SETOF task_processor_task AS $$ +DECLARE + row_to_return task_processor_task; +BEGIN + -- Select the tasks that needs to be processed + FOR row_to_return IN + SELECT * + FROM task_processor_task + WHERE num_failures < 3 AND scheduled_for < NOW() AND completed = FALSE AND is_locked = FALSE + ORDER BY scheduled_for ASC, created_at ASC + LIMIT num_tasks + -- Select for update to ensure that no other workers can select these tasks while in this transaction block + FOR UPDATE SKIP LOCKED + LOOP + -- Lock every selected task(by updating `is_locked` to true) + UPDATE task_processor_task + -- Lock this row by setting is_locked True, so that no other workers can select these tasks after this + -- transaction is complete (but the tasks are still being executed by the current worker) + SET is_locked = TRUE + WHERE id = row_to_return.id; + -- If we don't explicitly update the `is_locked` column here, the client will receive the row that is actually locked but has the `is_locked` value set to `False`. + row_to_return.is_locked := TRUE; + RETURN NEXT row_to_return; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql + diff --git a/src/task_processor/migrations/sql/0011_get_tasks_to_process.sql b/src/task_processor/migrations/sql/0011_get_tasks_to_process.sql new file mode 100644 index 0000000..2dc6d60 --- /dev/null +++ b/src/task_processor/migrations/sql/0011_get_tasks_to_process.sql @@ -0,0 +1,30 @@ +CREATE OR REPLACE FUNCTION get_tasks_to_process(num_tasks integer) +RETURNS SETOF task_processor_task AS $$ +DECLARE + row_to_return task_processor_task; +BEGIN + -- Select the tasks that needs to be processed + FOR row_to_return IN + SELECT * + FROM task_processor_task + WHERE num_failures < 3 AND scheduled_for < NOW() AND completed = FALSE AND is_locked = FALSE + ORDER BY priority ASC, scheduled_for ASC, created_at ASC + LIMIT num_tasks + -- Select for update to ensure that no other workers can select these tasks while in this transaction block + FOR UPDATE SKIP LOCKED + LOOP + -- Lock every selected task(by updating `is_locked` to true) + UPDATE task_processor_task + -- Lock this row by setting is_locked True, so that no other workers can select these tasks after this + -- transaction is complete (but the tasks are still being executed by the current worker) + SET is_locked = TRUE + WHERE id = row_to_return.id; + -- If we don't explicitly update the `is_locked` column here, the client will receive the row that is actually locked but has the `is_locked` value set to `False`. + row_to_return.is_locked := TRUE; + RETURN NEXT row_to_return; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql + diff --git a/src/task_processor/migrations/sql/0012_get_recurringtasks_to_process.sql b/src/task_processor/migrations/sql/0012_get_recurringtasks_to_process.sql new file mode 100644 index 0000000..52bec14 --- /dev/null +++ b/src/task_processor/migrations/sql/0012_get_recurringtasks_to_process.sql @@ -0,0 +1,33 @@ +CREATE OR REPLACE FUNCTION get_recurringtasks_to_process() +RETURNS SETOF task_processor_recurringtask AS $$ +DECLARE + row_to_return task_processor_recurringtask; +BEGIN + -- Select the tasks that needs to be processed + FOR row_to_return IN + SELECT * + FROM task_processor_recurringtask + -- Add one minute to the timeout as a grace period for overhead + WHERE is_locked = FALSE OR (locked_at IS NOT NULL AND locked_at < NOW() - timeout + INTERVAL '1 minute') + ORDER BY id + LIMIT 1 + -- Select for update to ensure that no other workers can select these tasks while in this transaction block + FOR UPDATE SKIP LOCKED + LOOP + -- Lock every selected task(by updating `is_locked` to true) + UPDATE task_processor_recurringtask + -- Lock this row by setting is_locked True, so that no other workers can select these tasks after this + -- transaction is complete (but the tasks are still being executed by the current worker) + SET is_locked = TRUE, locked_at = NOW() + WHERE id = row_to_return.id; + -- If we don't explicitly update the columns here, the client will receive a row + -- that is locked but still shows `is_locked` as `False` and `locked_at` as `None`. + row_to_return.is_locked := TRUE; + row_to_return.locked_at := NOW(); + RETURN NEXT row_to_return; + END LOOP; + + RETURN; +END; +$$ LANGUAGE plpgsql + diff --git a/src/task_processor/migrations/sql/__init__.py b/src/task_processor/migrations/sql/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/task_processor/models.py b/src/task_processor/models.py new file mode 100644 index 0000000..3b0e080 --- /dev/null +++ b/src/task_processor/models.py @@ -0,0 +1,245 @@ +import typing +import uuid +from datetime import datetime, timedelta + +import simplejson as json +from django.core.serializers.json import DjangoJSONEncoder +from django.db import models +from django.utils import timezone + +from task_processor.exceptions import TaskProcessingError, TaskQueueFullError +from task_processor.managers import RecurringTaskManager, TaskManager +from task_processor.task_registry import registered_tasks +from task_processor.types import TaskCallable + +_django_json_encoder_default = DjangoJSONEncoder().default + + +class TaskPriority(models.IntegerChoices): + LOWER = 100 + LOW = 75 + NORMAL = 50 + HIGH = 25 + HIGHEST = 0 + + +class AbstractBaseTask(models.Model): + uuid = models.UUIDField(unique=True, default=uuid.uuid4) + created_at = models.DateTimeField(auto_now_add=True) + task_identifier = models.CharField(max_length=200) + serialized_args = models.TextField(blank=True, null=True) + serialized_kwargs = models.TextField(blank=True, null=True) + is_locked = models.BooleanField(default=False) + timeout = models.DurationField(blank=True, null=True) + + class Meta: + abstract = True + + @property + def args(self) -> typing.List[typing.Any]: + if self.serialized_args: + args = self.deserialize_data(self.serialized_args) + if typing.TYPE_CHECKING: + assert isinstance(args, list) + return args + return [] + + @property + def kwargs(self) -> typing.Dict[str, typing.Any]: + if self.serialized_kwargs: + kwargs = self.deserialize_data(self.serialized_kwargs) + if typing.TYPE_CHECKING: + assert isinstance(kwargs, dict) + return kwargs + return {} + + @staticmethod + def serialize_data(data: typing.Any) -> str: + return json.dumps(data, default=_django_json_encoder_default) + + @staticmethod + def deserialize_data(data: str) -> typing.Any: + return json.loads(data) + + def mark_failure(self) -> None: + self.unlock() + + def mark_success(self) -> None: + self.unlock() + + def unlock(self) -> None: + self.is_locked = False + + def run(self) -> None: + return self.callable(*self.args, **self.kwargs) + + @property + def callable(self) -> TaskCallable[typing.Any]: + try: + task = registered_tasks[self.task_identifier] + return task.task_function + except KeyError as e: + raise TaskProcessingError( + "No task registered with identifier '%s'. Ensure your task is " + "decorated with @register_task_handler.", + self.task_identifier, + ) from e + + +class Task(AbstractBaseTask): + scheduled_for = models.DateTimeField(blank=True, null=True, default=timezone.now) + + timeout = models.DurationField(blank=True, null=True) + + # denormalise failures and completion so that we can use select_for_update + num_failures = models.IntegerField(default=0) + completed = models.BooleanField(default=False) + objects: TaskManager = TaskManager() + priority = models.SmallIntegerField( + default=None, null=True, choices=TaskPriority.choices + ) + + class Meta: + # We have customised the migration in 0004 to only apply this change to postgres databases + # TODO: work out how to index the taskprocessor_task table for Oracle and MySQL + indexes = [ + models.Index( + name="incomplete_tasks_idx", + fields=["scheduled_for"], + condition=models.Q(completed=False, num_failures__lt=3), + ) + ] + + @classmethod + def create( + cls, + task_identifier: str, + scheduled_for: datetime, + priority: TaskPriority = TaskPriority.NORMAL, + queue_size: int | None = None, + *, + args: typing.Tuple[typing.Any, ...] | None = None, + kwargs: typing.Dict[str, typing.Any] | None = None, + timeout: timedelta | None = timedelta(seconds=60), + ) -> "Task": + if queue_size and cls._is_queue_full(task_identifier, queue_size): + raise TaskQueueFullError( + f"Queue for task {task_identifier} is full. " + f"Max queue size is {queue_size}" + ) + return Task( + task_identifier=task_identifier, + scheduled_for=scheduled_for, + priority=priority, + serialized_args=cls.serialize_data(args or tuple()), + serialized_kwargs=cls.serialize_data(kwargs or dict()), + timeout=timeout, + ) + + @classmethod + def _is_queue_full(cls, task_identifier: str, queue_size: int) -> bool: + return ( + cls.objects.filter( + task_identifier=task_identifier, + completed=False, + num_failures__lt=3, + ).count() + > queue_size + ) + + def mark_failure(self) -> None: + super().mark_failure() + self.num_failures += 1 + + def mark_success(self) -> None: + super().mark_success() + self.completed = True + + +class RecurringTask(AbstractBaseTask): + run_every = models.DurationField() + first_run_time = models.TimeField(blank=True, null=True) + + locked_at = models.DateTimeField(blank=True, null=True) + timeout = models.DurationField(default=timedelta(minutes=30)) + + objects: RecurringTaskManager = RecurringTaskManager() + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["task_identifier", "run_every"], + name="unique_run_every_tasks", + ), + ] + + def unlock(self) -> None: + self.is_locked = False + self.locked_at = None + + @property + def should_execute(self) -> bool: + now = timezone.now() + last_task_run = ( + self.task_runs.order_by("-started_at").first() if self.pk else None + ) + + if not last_task_run: + # If we have never run this task, then we should execute it only if + # the time has passed after which we want to ensure this task runs. + # This allows us to control when intensive tasks should be run. + return not (self.first_run_time and self.first_run_time > now.time()) + + # if the last run was at t- run_every, then we should execute it + if (timezone.now() - last_task_run.started_at) >= self.run_every: + return True + + # if the last run was not a success and we do not have + # more than 3 failures in t- run_every, then we should execute it + if ( + last_task_run.result != TaskResult.SUCCESS.name + and self.task_runs.filter(started_at__gte=(now - self.run_every)).count() + <= 3 + ): + return True + # otherwise, we should not execute it + return False + + @property + def is_task_registered(self) -> bool: + return self.task_identifier in registered_tasks + + +class TaskResult(models.Choices): + SUCCESS = "SUCCESS" + FAILURE = "FAILURE" + + +class AbstractTaskRun(models.Model): + started_at = models.DateTimeField() + finished_at = models.DateTimeField(blank=True, null=True) + result = models.CharField( + max_length=50, choices=TaskResult.choices, blank=True, null=True, db_index=True + ) + error_details = models.TextField(blank=True, null=True) + task = models.ForeignKey( + AbstractBaseTask, on_delete=models.CASCADE, related_name="task_runs" + ) + + class Meta: + abstract = True + + +class TaskRun(AbstractTaskRun): + task = models.ForeignKey(Task, on_delete=models.CASCADE, related_name="task_runs") + + +class RecurringTaskRun(AbstractTaskRun): + task = models.ForeignKey( + RecurringTask, on_delete=models.CASCADE, related_name="task_runs" + ) + + +class HealthCheckModel(models.Model): + created_at = models.DateTimeField(auto_now_add=True) + uuid = models.UUIDField(unique=True, blank=False, null=False) diff --git a/src/task_processor/monitoring.py b/src/task_processor/monitoring.py new file mode 100644 index 0000000..848b354 --- /dev/null +++ b/src/task_processor/monitoring.py @@ -0,0 +1,12 @@ +from django.utils import timezone + +from task_processor.models import Task + + +def get_num_waiting_tasks() -> int: + return Task.objects.filter( + num_failures__lt=3, + completed=False, + scheduled_for__lt=timezone.now(), + is_locked=False, + ).count() diff --git a/src/task_processor/processor.py b/src/task_processor/processor.py new file mode 100644 index 0000000..78aa820 --- /dev/null +++ b/src/task_processor/processor.py @@ -0,0 +1,138 @@ +import logging +import traceback +import typing +from concurrent.futures import ThreadPoolExecutor +from datetime import timedelta + +from django.utils import timezone + +from task_processor.models import ( + AbstractBaseTask, + RecurringTask, + RecurringTaskRun, + Task, + TaskResult, + TaskRun, +) + +T = typing.TypeVar("T", bound=AbstractBaseTask) +AnyTaskRun = TaskRun | RecurringTaskRun + +logger = logging.getLogger(__name__) + +UNREGISTERED_RECURRING_TASK_GRACE_PERIOD = timedelta(minutes=30) + + +def run_tasks(num_tasks: int = 1) -> list[TaskRun]: + if num_tasks < 1: + raise ValueError("Number of tasks to process must be at least one") + + tasks = Task.objects.get_tasks_to_process(num_tasks) + + if tasks: + logger.debug(f"Running {len(tasks)} task(s)") + + executed_tasks = [] + task_runs = [] + + for task in tasks: + task, task_run = _run_task(task) + + executed_tasks.append(task) + assert isinstance(task_run, TaskRun) + task_runs.append(task_run) + + if executed_tasks: + Task.objects.bulk_update( + executed_tasks, + fields=["completed", "num_failures", "is_locked"], + ) + + if task_runs: + TaskRun.objects.bulk_create(task_runs) + logger.debug(f"Finished running {len(task_runs)} task(s)") + + return task_runs + + return [] + + +def run_recurring_tasks() -> list[RecurringTaskRun]: + # NOTE: We will probably see a lot of delay in the execution of recurring tasks + # if the tasks take longer then `run_every` to execute. This is not + # a problem for now, but we should be mindful of this limitation + tasks = RecurringTask.objects.get_tasks_to_process() + if tasks: + logger.debug(f"Running {len(tasks)} recurring task(s)") + + task_runs = [] + + for task in tasks: + if not task.is_task_registered: + # This is necessary to ensure that old instances of the task processor, + # which may still be running during deployment, do not remove tasks added by new instances. + # Reference: https://github.com/Flagsmith/flagsmith/issues/2551 + if ( + timezone.now() - task.created_at + ) > UNREGISTERED_RECURRING_TASK_GRACE_PERIOD: + task.delete() + continue + + if task.should_execute: + task, task_run = _run_task(task) + assert isinstance(task_run, RecurringTaskRun) + task_runs.append(task_run) + else: + task.unlock() + + # update all tasks that were not deleted + to_update = [task for task in tasks if task.id] + RecurringTask.objects.bulk_update(to_update, fields=["is_locked", "locked_at"]) + + if task_runs: + RecurringTaskRun.objects.bulk_create(task_runs) + logger.debug(f"Finished running {len(task_runs)} recurring task(s)") + + return task_runs + + return [] + + +def _run_task( + task: T, +) -> typing.Tuple[T, AnyTaskRun]: + logger.debug( + f"Running task {task.task_identifier} id={task.pk} args={task.args} kwargs={task.kwargs}" + ) + task_run: AnyTaskRun = task.task_runs.model(started_at=timezone.now(), task=task) # type: ignore[attr-defined] + + try: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(task.run) + timeout = task.timeout.total_seconds() if task.timeout else None + future.result(timeout=timeout) # Wait for completion or timeout + + task_run.result = TaskResult.SUCCESS.value + task_run.finished_at = timezone.now() + task.mark_success() + logger.debug(f"Task {task.task_identifier} id={task.pk} completed") + + except Exception as e: + # For errors that don't include a default message (e.g., TimeoutError), + # fall back to using repr. + err_msg = str(e) or repr(e) + + logger.error( + "Failed to execute task '%s', with id %d. Exception: %s", + task.task_identifier, + task.pk, + err_msg, + exc_info=True, + ) + + task.mark_failure() + + task_run.result = TaskResult.FAILURE.value + task_run.error_details = str(traceback.format_exc()) + + return task, task_run diff --git a/src/task_processor/py.typed b/src/task_processor/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/task_processor/serializers.py b/src/task_processor/serializers.py new file mode 100644 index 0000000..1302504 --- /dev/null +++ b/src/task_processor/serializers.py @@ -0,0 +1,7 @@ +from rest_framework import serializers + +from task_processor.types import MonitoringInfo + + +class MonitoringSerializer(serializers.Serializer[MonitoringInfo]): + waiting = serializers.IntegerField(read_only=True) diff --git a/src/task_processor/task_registry.py b/src/task_processor/task_registry.py new file mode 100644 index 0000000..c64a089 --- /dev/null +++ b/src/task_processor/task_registry.py @@ -0,0 +1,82 @@ +import enum +import logging +import typing +from dataclasses import dataclass + +from task_processor.types import TaskCallable + +logger = logging.getLogger(__name__) + + +class TaskType(enum.Enum): + STANDARD = "STANDARD" + RECURRING = "RECURRING" + + +@dataclass +class RegisteredTask: + task_identifier: str + task_function: TaskCallable[typing.Any] + task_type: TaskType = TaskType.STANDARD + task_kwargs: dict[str, typing.Any] | None = None + + +registered_tasks: dict[str, RegisteredTask] = {} + + +def initialise() -> None: + global registered_tasks + + from task_processor.models import RecurringTask + + for task_identifier, registered_task in registered_tasks.items(): + logger.debug("Initialising task '%s'", task_identifier) + + if registered_task.task_type == TaskType.RECURRING: + logger.debug("Persisting recurring task '%s'", task_identifier) + RecurringTask.objects.update_or_create( + task_identifier=task_identifier, + defaults=registered_task.task_kwargs, + ) + + +def get_task(task_identifier: str) -> RegisteredTask: + global registered_tasks + + return registered_tasks[task_identifier] + + +def register_task( + task_identifier: str, + callable_: TaskCallable[typing.Any], +) -> None: + global registered_tasks + + registered_task = RegisteredTask( + task_identifier=task_identifier, + task_function=callable_, + ) + registered_tasks[task_identifier] = registered_task + + +def register_recurring_task( + task_identifier: str, + callable_: TaskCallable[typing.Any], + **task_kwargs: typing.Any, +) -> None: + global registered_tasks + + logger.debug("Registering recurring task '%s'", task_identifier) + + registered_task = RegisteredTask( + task_identifier=task_identifier, + task_function=callable_, + task_type=TaskType.RECURRING, + task_kwargs=task_kwargs, + ) + registered_tasks[task_identifier] = registered_task + + logger.debug( + "Registered tasks now has the following tasks registered: %s", + list(registered_tasks.keys()), + ) diff --git a/src/task_processor/task_run_method.py b/src/task_processor/task_run_method.py new file mode 100644 index 0000000..e7d471e --- /dev/null +++ b/src/task_processor/task_run_method.py @@ -0,0 +1,7 @@ +from enum import Enum + + +class TaskRunMethod(Enum): + SYNCHRONOUSLY = "SYNCHRONOUSLY" + SEPARATE_THREAD = "SEPARATE_THREAD" + TASK_PROCESSOR = "TASK_PROCESSOR" diff --git a/src/task_processor/tasks.py b/src/task_processor/tasks.py new file mode 100644 index 0000000..787f596 --- /dev/null +++ b/src/task_processor/tasks.py @@ -0,0 +1,71 @@ +import logging +import typing +from datetime import timedelta + +from django.conf import settings +from django.db.models import Q +from django.utils import timezone + +from task_processor.decorators import ( + register_recurring_task, + register_task_handler, +) +from task_processor.models import HealthCheckModel, RecurringTaskRun, Task + +if typing.TYPE_CHECKING: + # ugh https://github.com/typeddjango/django-stubs/issues/1744 + # TODO maybe switch to https://github.com/getsentry/sentry-forked-django-stubs + HealthCheckModel.objects = HealthCheckModel._default_manager + RecurringTaskRun.objects = RecurringTaskRun._default_manager + + +logger = logging.getLogger(__name__) + + +@register_task_handler() +def create_health_check_model(health_check_model_uuid: str) -> None: + logger.info("Creating health check model.") + HealthCheckModel.objects.create(uuid=health_check_model_uuid) + + +@register_recurring_task( + run_every=settings.TASK_DELETE_RUN_EVERY, + first_run_time=settings.TASK_DELETE_RUN_TIME, +) +def clean_up_old_tasks() -> None: + if not settings.ENABLE_CLEAN_UP_OLD_TASKS: + return + + now = timezone.now() + delete_before = now - timedelta(days=settings.TASK_DELETE_RETENTION_DAYS) + + # build the query + query = Q(completed=True) + if settings.TASK_DELETE_INCLUDE_FAILED_TASKS: + query = query | Q(num_failures__gte=3) + query = Q(scheduled_for__lt=delete_before) & query + + # TODO: validate if deleting in batches is more / less impactful on the DB + while True: + # delete in batches of settings.TASK_DELETE_BATCH_SIZE + num_tasks_deleted, _ = Task.objects.filter( + pk__in=Task.objects.filter(query).values_list("id", flat=True)[ + 0 : settings.TASK_DELETE_BATCH_SIZE # noqa:E203 + ] + ).delete() + if num_tasks_deleted == 0: + break + + +@register_recurring_task( + run_every=settings.TASK_DELETE_RUN_EVERY, + first_run_time=settings.TASK_DELETE_RUN_TIME, +) +def clean_up_old_recurring_task_runs() -> None: + if not settings.ENABLE_CLEAN_UP_OLD_TASKS: + return + + now = timezone.now() + delete_before = now - timedelta(days=settings.RECURRING_TASK_RUN_RETENTION_DAYS) + + RecurringTaskRun.objects.filter(finished_at__lt=delete_before).delete() diff --git a/src/task_processor/threads.py b/src/task_processor/threads.py new file mode 100644 index 0000000..4529cd2 --- /dev/null +++ b/src/task_processor/threads.py @@ -0,0 +1,112 @@ +import logging +import time +import typing +from datetime import datetime, timedelta +from threading import Thread + +from django.db import close_old_connections +from django.utils import timezone + +from task_processor.processor import run_recurring_tasks, run_tasks +from task_processor.task_registry import initialise +from task_processor.types import TaskProcessorConfig + +logger = logging.getLogger(__name__) + + +class TaskRunnerCoordinator(Thread): + def __init__( + self, + *args: typing.Any, + config: TaskProcessorConfig, + **kwargs: typing.Any, + ) -> None: + super().__init__(*args, **kwargs) + self.config = config + self._threads: list[TaskRunner] = [] + self._monitor_threads = True + + def run(self) -> None: + initialise() + + logger.info("Processor starting") + + for _ in range(self.config.num_threads): + self._threads.append( + task := TaskRunner( + sleep_interval_millis=self.config.sleep_interval_ms, + queue_pop_size=self.config.queue_pop_size, + ) + ) + task.start() + + ms_before_unhealthy = ( + self.config.grace_period_ms + self.config.sleep_interval_ms + ) + while self._monitor_threads: + time.sleep(1) + unhealthy_threads = self._get_unhealthy_threads( + ms_before_unhealthy=ms_before_unhealthy + ) + if unhealthy_threads: + logger.warning("%d unhealthy threads detected", len(unhealthy_threads)) + + for thread in self._threads: + thread.join() + + def _get_unhealthy_threads(self, ms_before_unhealthy: int) -> list["TaskRunner"]: + unhealthy_threads = [] + healthy_threshold = timezone.now() - timedelta(milliseconds=ms_before_unhealthy) + + for thread in self._threads: + if ( + not thread.is_alive() + or not thread.last_checked_for_tasks + or thread.last_checked_for_tasks < healthy_threshold + ): + unhealthy_threads.append(thread) + return unhealthy_threads + + def stop(self) -> None: + self._monitor_threads = False + for t in self._threads: + t.stop() + + +class TaskRunner(Thread): + def __init__( + self, + *args: typing.Any, + sleep_interval_millis: int = 2000, + queue_pop_size: int = 1, + **kwargs: typing.Any, + ): + super(TaskRunner, self).__init__(*args, **kwargs) + self.sleep_interval_millis = sleep_interval_millis + self.queue_pop_size = queue_pop_size + self.last_checked_for_tasks: datetime | None = None + + self._stopped = False + + def run(self) -> None: + while not self._stopped: + self.last_checked_for_tasks = timezone.now() + self.run_iteration() + time.sleep(self.sleep_interval_millis / 1000) + + def run_iteration(self) -> None: + try: + run_tasks(self.queue_pop_size) + run_recurring_tasks() + except Exception as e: + # To prevent task threads from dying if they get an error retrieving the tasks from the + # database this will allow the thread to continue trying to retrieve tasks if it can + # successfully re-establish a connection to the database. + # TODO: is this also what is causing tasks to get stuck as locked? Can we unlock + # tasks here? + + logger.error("Received error retrieving tasks: %s.", e, exc_info=e) + close_old_connections() + + def stop(self) -> None: + self._stopped = True diff --git a/src/task_processor/types.py b/src/task_processor/types.py new file mode 100644 index 0000000..3b32a81 --- /dev/null +++ b/src/task_processor/types.py @@ -0,0 +1,18 @@ +from dataclasses import dataclass +from typing import Callable, ParamSpec, TypeAlias, TypedDict + +TaskParameters = ParamSpec("TaskParameters") + +TaskCallable: TypeAlias = Callable[TaskParameters, None] + + +@dataclass +class TaskProcessorConfig: + num_threads: int + sleep_interval_ms: int + grace_period_ms: int + queue_pop_size: int + + +class MonitoringInfo(TypedDict): + waiting: int diff --git a/src/task_processor/urls.py b/src/task_processor/urls.py new file mode 100644 index 0000000..508e10f --- /dev/null +++ b/src/task_processor/urls.py @@ -0,0 +1,5 @@ +from django.urls import path + +from task_processor.views import monitoring + +urlpatterns = [path("monitoring/", monitoring)] diff --git a/src/task_processor/utils.py b/src/task_processor/utils.py new file mode 100644 index 0000000..f526ff5 --- /dev/null +++ b/src/task_processor/utils.py @@ -0,0 +1,71 @@ +import argparse +import inspect +import logging +from contextlib import contextmanager +from typing import Any, Generator + +from task_processor.threads import TaskRunnerCoordinator +from task_processor.types import TaskCallable, TaskProcessorConfig + +logger = logging.getLogger(__name__) + + +def get_task_identifier_from_function( + function: TaskCallable[Any], + task_name: str | None, +) -> str: + module = inspect.getmodule(function) + assert module + return f"{module.__name__.rsplit('.')[-1]}.{task_name or function.__name__}" + + +def add_arguments(parser: argparse.ArgumentParser) -> None: + parser.add_argument( + "--numthreads", + type=int, + help="Number of worker threads to run.", + default=5, + ) + parser.add_argument( + "--sleepintervalms", + type=int, + help="Number of millis each worker waits before checking for new tasks", + default=2000, + ) + parser.add_argument( + "--graceperiodms", + type=int, + help="Number of millis before running task is considered 'stuck'.", + default=20000, + ) + parser.add_argument( + "--queuepopsize", + type=int, + help="Number of tasks each worker will pop from the queue on each cycle.", + default=10, + ) + + +@contextmanager +def start_task_processor( + options: dict[str, Any], +) -> Generator[ + TaskRunnerCoordinator, + None, + None, +]: + config = TaskProcessorConfig( + num_threads=options["numthreads"], + sleep_interval_ms=options["sleepintervalms"], + grace_period_ms=options["graceperiodms"], + queue_pop_size=options["queuepopsize"], + ) + + logger.debug("Config: %s", config) + + coordinator = TaskRunnerCoordinator(config=config) + coordinator.start() + try: + yield coordinator + finally: + coordinator.stop() diff --git a/src/task_processor/views.py b/src/task_processor/views.py new file mode 100644 index 0000000..bd207a8 --- /dev/null +++ b/src/task_processor/views.py @@ -0,0 +1,20 @@ +from typing import Any + +from drf_yasg.utils import swagger_auto_schema # type: ignore[import-untyped] +from rest_framework.decorators import api_view, permission_classes +from rest_framework.permissions import IsAdminUser, IsAuthenticated +from rest_framework.request import Request +from rest_framework.response import Response + +from task_processor.monitoring import get_num_waiting_tasks +from task_processor.serializers import MonitoringSerializer + + +@swagger_auto_schema(method="GET", responses={200: MonitoringSerializer()}) # type: ignore[misc] +@api_view(http_method_names=["GET"]) +@permission_classes([IsAuthenticated, IsAdminUser]) +def monitoring(request: Request, /, **kwargs: Any) -> Response: + return Response( + data={"waiting": get_num_waiting_tasks()}, + content_type="application/json", + ) diff --git a/tests/unit/task_processor/__init__.py b/tests/unit/task_processor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit/task_processor/conftest.py b/tests/unit/task_processor/conftest.py new file mode 100644 index 0000000..23769b2 --- /dev/null +++ b/tests/unit/task_processor/conftest.py @@ -0,0 +1,45 @@ +import logging +import typing + +import pytest + +from task_processor.task_registry import RegisteredTask + + +@pytest.fixture +def run_by_processor(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("RUN_BY_PROCESSOR", "True") + + +class GetTaskProcessorCaplog(typing.Protocol): + def __call__( + self, log_level: str | int = logging.INFO + ) -> pytest.LogCaptureFixture: ... + + +@pytest.fixture +def get_task_processor_caplog( + caplog: pytest.LogCaptureFixture, +) -> GetTaskProcessorCaplog: + # caplog doesn't allow you to capture logging outputs from loggers that don't + # propagate to root. Quick hack here to get the task_processor logger to + # propagate. + # TODO: look into using loguru. + + def _inner(log_level: str | int = logging.INFO) -> pytest.LogCaptureFixture: + task_processor_logger = logging.getLogger("task_processor") + task_processor_logger.propagate = True + # Assume required level for the logger. + task_processor_logger.setLevel(log_level) + caplog.set_level(log_level) + return caplog + + return _inner + + +@pytest.fixture(autouse=True) +def task_registry() -> typing.Generator[dict[str, RegisteredTask], None, None]: + from task_processor.task_registry import registered_tasks + + registered_tasks.clear() + yield registered_tasks diff --git a/tests/unit/task_processor/test_unit_task_processor_decorators.py b/tests/unit/task_processor/test_unit_task_processor_decorators.py new file mode 100644 index 0000000..3609515 --- /dev/null +++ b/tests/unit/task_processor/test_unit_task_processor_decorators.py @@ -0,0 +1,229 @@ +import json +import typing +from datetime import timedelta +from unittest.mock import MagicMock + +import pytest +from pytest_django import DjangoCaptureOnCommitCallbacks +from pytest_django.fixtures import SettingsWrapper +from pytest_mock import MockerFixture + +from task_processor.decorators import ( + register_recurring_task, + register_task_handler, +) +from task_processor.exceptions import InvalidArgumentsError +from task_processor.models import RecurringTask, Task, TaskPriority +from task_processor.task_registry import get_task, initialise +from task_processor.task_run_method import TaskRunMethod + +if typing.TYPE_CHECKING: + # This import breaks private-package-test workflow in core + from tests.unit.task_processor.conftest import GetTaskProcessorCaplog + + +@pytest.fixture +def mock_thread_class( + mocker: MockerFixture, +) -> MagicMock: + mock_thread_class = mocker.patch( + "task_processor.decorators.Thread", + return_value=mocker.MagicMock(), + ) + return mock_thread_class + + +@pytest.mark.django_db +def test_register_task_handler_run_in_thread__transaction_commit__true__default( + get_task_processor_caplog: "GetTaskProcessorCaplog", + mock_thread_class: MagicMock, + django_capture_on_commit_callbacks: DjangoCaptureOnCommitCallbacks, +) -> None: + # Given + caplog = get_task_processor_caplog() + + @register_task_handler() + def my_function(*args: str, **kwargs: str) -> None: + pass + + mock_thread = mock_thread_class.return_value + + args = ("foo",) + kwargs = {"bar": "baz"} + + # When + with django_capture_on_commit_callbacks(execute=True): + my_function.run_in_thread(args=args, kwargs=kwargs) + + # Then + mock_thread_class.assert_called_once_with( + target=my_function.unwrapped, args=args, kwargs=kwargs, daemon=True + ) + mock_thread.start.assert_called_once() + + assert len(caplog.records) == 1 + assert ( + caplog.records[0].message == "Running function my_function in unmanaged thread." + ) + + +def test_register_task_handler_run_in_thread__transaction_commit__false( + get_task_processor_caplog: "GetTaskProcessorCaplog", + mock_thread_class: MagicMock, +) -> None: + # Given + caplog = get_task_processor_caplog() + + @register_task_handler(transaction_on_commit=False) + def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: + pass + + mock_thread = mock_thread_class.return_value + + args = ("foo",) + kwargs = {"bar": "baz"} + + # When + my_function.run_in_thread(args=args, kwargs=kwargs) + + # Then + mock_thread_class.assert_called_once_with( + target=my_function.unwrapped, args=args, kwargs=kwargs, daemon=True + ) + mock_thread.start.assert_called_once() + + assert len(caplog.records) == 1 + assert ( + caplog.records[0].message == "Running function my_function in unmanaged thread." + ) + + +def test_register_recurring_task( + mocker: MockerFixture, + db: None, + run_by_processor: None, +) -> None: + # Given + mock = mocker.Mock() + mock.__name__ = "a_function" + task_kwargs = {"first_arg": "foo", "second_arg": "bar"} + run_every = timedelta(minutes=10) + task_identifier = "mock.a_function" + + # When + register_recurring_task( + run_every=run_every, + kwargs=task_kwargs, + )(mock) + + # Then + initialise() + + task = RecurringTask.objects.get(task_identifier=task_identifier) + assert task.serialized_kwargs == json.dumps(task_kwargs) + assert task.run_every == run_every + + assert get_task(task_identifier) + assert task.callable is mock + + +def test_register_recurring_task_does_nothing_if_not_run_by_processor( + mocker: MockerFixture, + db: None, +) -> None: + # Given + + task_kwargs = {"first_arg": "foo", "second_arg": "bar"} + run_every = timedelta(minutes=10) + task_identifier = "test_unit_task_processor_decorators.some_function" + + # When + @register_recurring_task( + run_every=run_every, + kwargs=task_kwargs, + ) + def some_function(first_arg: str, second_arg: str) -> None: + pass + + # Then + assert not RecurringTask.objects.filter(task_identifier=task_identifier).exists() + with pytest.raises(KeyError): + assert get_task(task_identifier) + + +def test_register_task_handler_validates_inputs() -> None: + # Given + @register_task_handler() + def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: + pass + + class NonSerializableObj: + pass + + # When + with pytest.raises(InvalidArgumentsError): + my_function(NonSerializableObj()) + + +@pytest.mark.parametrize( + "task_run_method", (TaskRunMethod.SEPARATE_THREAD, TaskRunMethod.SYNCHRONOUSLY) +) +def test_inputs_are_validated_when_run_without_task_processor( + settings: SettingsWrapper, task_run_method: TaskRunMethod +) -> None: + # Given + settings.TASK_RUN_METHOD = task_run_method + + @register_task_handler() + def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: + pass + + class NonSerializableObj: + pass + + # When + with pytest.raises(InvalidArgumentsError): + my_function.delay(args=(NonSerializableObj(),)) + + +def test_delay_returns_none_if_task_queue_is_full( + settings: SettingsWrapper, db: None +) -> None: + # Given + settings.TASK_RUN_METHOD = TaskRunMethod.TASK_PROCESSOR + + @register_task_handler(queue_size=1) + def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: + pass + + for _ in range(10): + Task.objects.create( + task_identifier="test_unit_task_processor_decorators.my_function" + ) + + # When + task = my_function.delay() + + # Then + assert task is None + + +def test_can_create_task_with_priority(settings: SettingsWrapper, db: None) -> None: + # Given + settings.TASK_RUN_METHOD = TaskRunMethod.TASK_PROCESSOR + + @register_task_handler(priority=TaskPriority.HIGH) + def my_function(*args: typing.Any, **kwargs: typing.Any) -> None: + pass + + for _ in range(10): + Task.objects.create( + task_identifier="test_unit_task_processor_decorators.my_function" + ) + + # When + task = my_function.delay() + + # Then + assert task + assert task.priority == TaskPriority.HIGH diff --git a/tests/unit/task_processor/test_unit_task_processor_health.py b/tests/unit/task_processor/test_unit_task_processor_health.py new file mode 100644 index 0000000..21a76bf --- /dev/null +++ b/tests/unit/task_processor/test_unit_task_processor_health.py @@ -0,0 +1,43 @@ +from pytest_django.fixtures import SettingsWrapper +from pytest_mock import MockerFixture + +from task_processor.health import is_processor_healthy +from task_processor.models import HealthCheckModel +from task_processor.task_run_method import TaskRunMethod + + +def test_is_processor_healthy_returns_false_if_task_not_processed( + mocker: MockerFixture, +) -> None: + # Given + mocker.patch("task_processor.health.create_health_check_model") + mocked_health_check_model_class = mocker.patch( + "task_processor.health.HealthCheckModel" + ) + mocked_health_check_model_class.objects.filter.return_value.first.return_value = ( + None + ) + + # When + result = is_processor_healthy(max_tries=3) + + # Then + assert result is False + + +def test_is_processor_healthy_returns_true_if_task_processed( + db: None, + settings: SettingsWrapper, +) -> None: + # Given + settings.TASK_RUN_METHOD = TaskRunMethod.SYNCHRONOUSLY + + # When + result = is_processor_healthy(max_tries=3) + + # Then + # the health is reported as success + assert result is True + + # but the health check model used to verify the health is deleted + assert not HealthCheckModel.objects.exists() diff --git a/tests/unit/task_processor/test_unit_task_processor_models.py b/tests/unit/task_processor/test_unit_task_processor_models.py new file mode 100644 index 0000000..311b9bc --- /dev/null +++ b/tests/unit/task_processor/test_unit_task_processor_models.py @@ -0,0 +1,71 @@ +from datetime import time, timedelta +from decimal import Decimal + +import pytest +from django.utils import timezone +from pytest_mock import MockerFixture + +from task_processor.decorators import register_task_handler +from task_processor.models import RecurringTask, Task +from task_processor.task_registry import initialise + +now = timezone.now() +one_hour_ago = now - timedelta(hours=1) +one_hour_from_now = now + timedelta(hours=1) + + +def test_task_run(mocker: MockerFixture) -> None: + # Given + mock = mocker.Mock() + mock.__name__ = "test_task" + + task_handler = register_task_handler()(mock) + + args = ("foo",) + kwargs = {"arg_two": "bar"} + + initialise() + + task = Task.create( + task_handler.task_identifier, + scheduled_for=timezone.now(), + args=args, + kwargs=kwargs, + ) + + # When + task.run() + + # Then + mock.assert_called_once_with(*args, **kwargs) + + +@pytest.mark.parametrize( + "input, expected_output", + ( + ({"value": Decimal("10")}, '{"value": 10}'), + ({"value": Decimal("10.12345")}, '{"value": 10.12345}'), + ), +) +def test_serialize_data_handles_decimal_objects( + input: dict[str, Decimal], + expected_output: str, +) -> None: + assert Task.serialize_data(input) == expected_output + + +@pytest.mark.parametrize( + "first_run_time, expected", + ((one_hour_ago.time(), True), (one_hour_from_now.time(), False)), +) +def test_recurring_task_run_should_execute_first_run_at( + first_run_time: time, + expected: bool, +) -> None: + assert ( + RecurringTask( + first_run_time=first_run_time, + run_every=timedelta(days=1), + ).should_execute + == expected + ) diff --git a/tests/unit/task_processor/test_unit_task_processor_monitoring.py b/tests/unit/task_processor/test_unit_task_processor_monitoring.py new file mode 100644 index 0000000..ffe27f7 --- /dev/null +++ b/tests/unit/task_processor/test_unit_task_processor_monitoring.py @@ -0,0 +1,38 @@ +from datetime import timedelta + +from django.utils import timezone + +from task_processor.models import Task +from task_processor.monitoring import get_num_waiting_tasks + + +def test_get_num_waiting_tasks(db: None) -> None: + # Given + now = timezone.now() + + # a task that is waiting + Task.objects.create(task_identifier="tasks.test_task") + + # a task that is scheduled for the future + Task.objects.create( + task_identifier="tasks.test_task", scheduled_for=now + timedelta(days=1) + ) + + # and a task that has been completed + Task.objects.create( + task_identifier="tasks.test_task", + scheduled_for=now - timedelta(days=1), + completed=True, + ) + + # and a task that has been locked for processing + Task.objects.create( + task_identifier="tasks.test_task", + is_locked=True, + ) + + # When + num_waiting_tasks = get_num_waiting_tasks() + + # Then + assert num_waiting_tasks == 1 diff --git a/tests/unit/task_processor/test_unit_task_processor_processor.py b/tests/unit/task_processor/test_unit_task_processor_processor.py new file mode 100644 index 0000000..c82b7a2 --- /dev/null +++ b/tests/unit/task_processor/test_unit_task_processor_processor.py @@ -0,0 +1,654 @@ +import logging +import time +import typing +import uuid +from datetime import timedelta +from threading import Thread + +import pytest +from django.core.cache import cache +from django.utils import timezone +from freezegun import freeze_time +from pytest import MonkeyPatch +from pytest_mock import MockerFixture + +from task_processor.decorators import ( + TaskHandler, + register_recurring_task, + register_task_handler, +) +from task_processor.models import ( + RecurringTask, + RecurringTaskRun, + Task, + TaskPriority, + TaskResult, + TaskRun, +) +from task_processor.processor import ( + UNREGISTERED_RECURRING_TASK_GRACE_PERIOD, + run_recurring_tasks, + run_tasks, +) +from task_processor.task_registry import initialise, registered_tasks + +if typing.TYPE_CHECKING: + # This import breaks private-package-test workflow in core + from tests.unit.task_processor.conftest import GetTaskProcessorCaplog + + +DEFAULT_CACHE_KEY = "foo" +DEFAULT_CACHE_VALUE = "bar" + + +@pytest.fixture(autouse=True) +def reset_cache() -> typing.Generator[None, None, None]: + yield + cache.clear() + + +@pytest.fixture +def dummy_task(db: None) -> TaskHandler[[str, str]]: + @register_task_handler() + def _dummy_task( + key: str = DEFAULT_CACHE_KEY, + value: str = DEFAULT_CACHE_VALUE, + ) -> None: + """function used to test that task is being run successfully""" + cache.set(key, value) + + return _dummy_task + + +@pytest.fixture +def raise_exception_task(db: None) -> TaskHandler[[str]]: + @register_task_handler() + def _raise_exception_task(msg: str) -> None: + raise Exception(msg) + + return _raise_exception_task + + +@pytest.fixture +def sleep_task(db: None) -> TaskHandler[[int]]: + @register_task_handler() + def _sleep_task(seconds: int) -> None: + time.sleep(seconds) + + return _sleep_task + + +def test_run_task_runs_task_and_creates_task_run_object_when_success( + dummy_task: TaskHandler[[str, str]], +) -> None: + # Given + task = Task.create( + dummy_task.task_identifier, + scheduled_for=timezone.now(), + ) + task.save() + + # When + task_runs = run_tasks() + + # Then + assert cache.get(DEFAULT_CACHE_KEY) + + assert len(task_runs) == TaskRun.objects.filter(task=task).count() == 1 + task_run = task_runs[0] + assert task_run.result == TaskResult.SUCCESS.value + assert task_run.started_at + assert task_run.finished_at + assert task_run.error_details is None + + task.refresh_from_db() + assert task.completed + + +def test_run_task_kills_task_after_timeout( + sleep_task: TaskHandler[[int]], + get_task_processor_caplog: "GetTaskProcessorCaplog", +) -> None: + # Given + caplog = get_task_processor_caplog(logging.ERROR) + task = Task.create( + sleep_task.task_identifier, + scheduled_for=timezone.now(), + args=(1,), + timeout=timedelta(microseconds=1), + ) + task.save() + + # When + task_runs = run_tasks() + + # Then + assert len(task_runs) == TaskRun.objects.filter(task=task).count() == 1 + task_run = task_runs[0] + assert task_run.result == TaskResult.FAILURE.value + assert task_run.started_at + assert task_run.finished_at is None + assert task_run.error_details + assert "TimeoutError" in task_run.error_details + + task.refresh_from_db() + + assert task.completed is False + assert task.num_failures == 1 + assert task.is_locked is False + + assert len(caplog.records) == 1 + assert caplog.records[0].message == ( + f"Failed to execute task '{task.task_identifier}', with id {task.id}. Exception: TimeoutError()" + ) + + +def test_run_recurring_task_kills_task_after_timeout( + db: None, + monkeypatch: MonkeyPatch, + get_task_processor_caplog: "GetTaskProcessorCaplog", +) -> None: + # Given + caplog = get_task_processor_caplog(logging.ERROR) + monkeypatch.setenv("RUN_BY_PROCESSOR", "True") + + @register_recurring_task( + run_every=timedelta(seconds=1), timeout=timedelta(microseconds=1) + ) + def _dummy_recurring_task() -> None: + time.sleep(1) + + initialise() + + task = RecurringTask.objects.get( + task_identifier="test_unit_task_processor_processor._dummy_recurring_task", + ) + # When + task_runs = run_recurring_tasks() + + # Then + assert len(task_runs) == RecurringTaskRun.objects.filter(task=task).count() == 1 + task_run = task_runs[0] + assert task_run.result == TaskResult.FAILURE.value + assert task_run.started_at + assert task_run.finished_at is None + assert task_run.error_details + assert "TimeoutError" in task_run.error_details + + task.refresh_from_db() + + assert task.locked_at is None + assert task.is_locked is False + + assert len(caplog.records) == 1 + assert caplog.records[0].message == ( + f"Failed to execute task '{task.task_identifier}', with id {task.id}. Exception: TimeoutError()" + ) + + +def test_run_recurring_tasks_runs_task_and_creates_recurring_task_run_object_when_success( + db: None, + monkeypatch: MonkeyPatch, +) -> None: + # Given + monkeypatch.setenv("RUN_BY_PROCESSOR", "True") + + @register_recurring_task(run_every=timedelta(seconds=1)) + def _dummy_recurring_task() -> None: + cache.set(DEFAULT_CACHE_KEY, DEFAULT_CACHE_VALUE) + + initialise() + + task = RecurringTask.objects.get( + task_identifier="test_unit_task_processor_processor._dummy_recurring_task", + ) + # When + task_runs = run_recurring_tasks() + + # Then + assert cache.get(DEFAULT_CACHE_KEY) + + assert len(task_runs) == RecurringTaskRun.objects.filter(task=task).count() == 1 + task_run = task_runs[0] + assert task_run.result == TaskResult.SUCCESS.value + assert task_run.started_at + assert task_run.finished_at + assert task_run.error_details is None + + +def test_run_recurring_tasks_runs_locked_task_after_tiemout( + db: None, + monkeypatch: MonkeyPatch, +) -> None: + # Given + monkeypatch.setenv("RUN_BY_PROCESSOR", "True") + + @register_recurring_task(run_every=timedelta(hours=1)) + def _dummy_recurring_task() -> None: + cache.set(DEFAULT_CACHE_KEY, DEFAULT_CACHE_VALUE) + + initialise() + + task = RecurringTask.objects.get( + task_identifier="test_unit_task_processor_processor._dummy_recurring_task", + ) + task.is_locked = True + task.locked_at = timezone.now() - timedelta(hours=1) + task.save() + + # When + assert cache.get(DEFAULT_CACHE_KEY) is None + task_runs = run_recurring_tasks() + + # Then + assert cache.get(DEFAULT_CACHE_KEY) == DEFAULT_CACHE_VALUE + + assert len(task_runs) == RecurringTaskRun.objects.filter(task=task).count() == 1 + task_run = task_runs[0] + assert task_run.result == TaskResult.SUCCESS.value + assert task_run.started_at + assert task_run.finished_at + assert task_run.error_details is None + + # And the task is no longer locked + task.refresh_from_db() + assert task.is_locked is False + assert task.locked_at is None + + +@pytest.mark.django_db(transaction=True) +def test_run_recurring_tasks_multiple_runs( + db: None, + run_by_processor: None, +) -> None: + # Given + @register_recurring_task(run_every=timedelta(milliseconds=200)) + def _dummy_recurring_task() -> None: + val = cache.get(DEFAULT_CACHE_KEY, 0) + 1 + cache.set(DEFAULT_CACHE_KEY, val) + + initialise() + + task = RecurringTask.objects.get( + task_identifier="test_unit_task_processor_processor._dummy_recurring_task", + ) + + # When + first_task_runs = run_recurring_tasks() + + # run the process again before the task is scheduled to run again to ensure + # that tasks are unlocked when they are picked up by the task processor but + # not executed. + no_task_runs = run_recurring_tasks() + + time.sleep(0.3) + + second_task_runs = run_recurring_tasks() + + # Then + assert len(first_task_runs) == 1 + assert len(no_task_runs) == 0 + assert len(second_task_runs) == 1 + + # we should still only have 2 organisations, despite executing the + # `run_recurring_tasks` function 3 times. + assert cache.get(DEFAULT_CACHE_KEY) == 2 + + all_task_runs = first_task_runs + second_task_runs + assert len(all_task_runs) == RecurringTaskRun.objects.filter(task=task).count() == 2 + for task_run in all_task_runs: + assert task_run.result == TaskResult.SUCCESS.value + assert task_run.started_at + assert task_run.finished_at + assert task_run.error_details is None + + +def test_run_recurring_tasks_only_executes_tasks_after_interval_set_by_run_every( + db: None, + run_by_processor: None, +) -> None: + # Given + @register_recurring_task(run_every=timedelta(milliseconds=200)) + def _dummy_recurring_task() -> None: + val = cache.get(DEFAULT_CACHE_KEY, 0) + 1 + cache.set(DEFAULT_CACHE_KEY, val) + + initialise() + + task = RecurringTask.objects.get( + task_identifier="test_unit_task_processor_processor._dummy_recurring_task", + ) + + # When - we call run_recurring_tasks twice + run_recurring_tasks() + run_recurring_tasks() + + # Then - we expect the task to have been run once + + assert cache.get(DEFAULT_CACHE_KEY) == 1 + + assert RecurringTaskRun.objects.filter(task=task).count() == 1 + + +def test_run_recurring_tasks_does_nothing_if_unregistered_task_is_new( + db: None, + run_by_processor: None, + caplog: pytest.LogCaptureFixture, +) -> None: + # Given + task_processor_logger = logging.getLogger("task_processor") + task_processor_logger.propagate = True + + task_identifier = "test_unit_task_processor_processor._a_task" + + @register_recurring_task(run_every=timedelta(milliseconds=100)) + def _a_task() -> None: + pass + + initialise() + + # now - remove the task from the registry + from task_processor.task_registry import registered_tasks + + registered_tasks.pop(task_identifier) + + # When + task_runs = run_recurring_tasks() + + # Then + assert len(task_runs) == 0 + assert RecurringTask.objects.filter(task_identifier=task_identifier).exists() + + +def test_run_recurring_tasks_deletes_the_task_if_unregistered_task_is_old( + db: None, + run_by_processor: None, + mocker: MockerFixture, +) -> None: + # Given + task_processor_logger = logging.getLogger("task_processor") + task_processor_logger.propagate = True + + task_identifier = "test_unit_task_processor_processor._a_task" + + with freeze_time(timezone.now() - UNREGISTERED_RECURRING_TASK_GRACE_PERIOD): + + @register_recurring_task(run_every=timedelta(milliseconds=100)) + def _a_task() -> None: + pass + + initialise() + + # now - remove the task from the registry + registered_tasks.pop(task_identifier) + + # When + task_runs = run_recurring_tasks() + + # Then + assert len(task_runs) == 0 + assert ( + RecurringTask.objects.filter(task_identifier=task_identifier).exists() is False + ) + + +def test_run_task_runs_task_and_creates_task_run_object_when_failure( + raise_exception_task: TaskHandler[[str]], + get_task_processor_caplog: "GetTaskProcessorCaplog", +) -> None: + # Given + caplog = get_task_processor_caplog(logging.DEBUG) + + msg = "Error!" + task = Task.create( + raise_exception_task.task_identifier, args=(msg,), scheduled_for=timezone.now() + ) + task.save() + + # When + task_runs = run_tasks() + + # Then + assert len(task_runs) == TaskRun.objects.filter(task=task).count() == 1 + task_run = task_runs[0] + assert task_run.result == TaskResult.FAILURE.value + assert task_run.started_at + assert task_run.finished_at is None + assert task_run.error_details is not None + + task.refresh_from_db() + assert not task.completed + + expected_log_records = [ + ("DEBUG", "Running 1 task(s)"), + ( + "DEBUG", + f"Running task {task.task_identifier} id={task.id} args={task.args} kwargs={task.kwargs}", + ), + ( + "ERROR", + f"Failed to execute task '{task.task_identifier}', with id {task.id}. Exception: {msg}", + ), + ("DEBUG", "Finished running 1 task(s)"), + ] + + assert expected_log_records == [ + (record.levelname, record.message) for record in caplog.records + ] + + +def test_run_task_runs_failed_task_again( + raise_exception_task: TaskHandler[[str]], +) -> None: + # Given + task = Task.create( + raise_exception_task.task_identifier, scheduled_for=timezone.now() + ) + task.save() + + # When + first_task_runs = run_tasks() + + # Now, let's run the task again + second_task_runs = run_tasks() + + # Then + task_runs = first_task_runs + second_task_runs + assert len(task_runs) == TaskRun.objects.filter(task=task).count() == 2 + + # Then + for task_run in task_runs: + assert task_run.result == TaskResult.FAILURE.value + assert task_run.started_at + assert task_run.finished_at is None + assert task_run.error_details is not None + + task.refresh_from_db() + assert task.completed is False + assert task.is_locked is False + + +def test_run_recurring_task_runs_task_and_creates_recurring_task_run_object_when_failure( + db: None, + run_by_processor: None, +) -> None: + # Given + task_identifier = "test_unit_task_processor_processor._raise_exception" + + @register_recurring_task(run_every=timedelta(seconds=1)) + def _raise_exception(organisation_name: str) -> None: + raise RuntimeError("test exception") + + initialise() + + task = RecurringTask.objects.get(task_identifier=task_identifier) + + # When + task_runs = run_recurring_tasks() + + # Then + assert len(task_runs) == RecurringTaskRun.objects.filter(task=task).count() == 1 + task_run = task_runs[0] + assert task_run.result == TaskResult.FAILURE.value + assert task_run.started_at + assert task_run.finished_at is None + assert task_run.error_details is not None + + +def test_run_task_does_nothing_if_no_tasks(db: None) -> None: + # Given - no tasks + # When + result = run_tasks() + # Then + assert result == [] + assert not TaskRun.objects.exists() + + +@pytest.mark.django_db(transaction=True) +def test_run_task_runs_tasks_in_correct_priority( + dummy_task: TaskHandler[[str, str]], +) -> None: + # Given + # 2 tasks + task_1 = Task.create( + dummy_task.task_identifier, + scheduled_for=timezone.now(), + args=("task 1 organisation",), + priority=TaskPriority.HIGH, + ) + task_1.save() + + task_2 = Task.create( + dummy_task.task_identifier, + scheduled_for=timezone.now(), + args=("task 2 organisation",), + priority=TaskPriority.HIGH, + ) + task_2.save() + + task_3 = Task.create( + dummy_task.task_identifier, + scheduled_for=timezone.now(), + args=("task 3 organisation",), + priority=TaskPriority.HIGHEST, + ) + task_3.save() + + # When + task_runs_1 = run_tasks() + task_runs_2 = run_tasks() + task_runs_3 = run_tasks() + + # Then + assert task_runs_1[0].task == task_3 + assert task_runs_2[0].task == task_1 + assert task_runs_3[0].task == task_2 + + +@pytest.mark.django_db(transaction=True) +def test_run_tasks_skips_locked_tasks( + dummy_task: TaskHandler[[str, str]], + sleep_task: TaskHandler[[int]], +) -> None: + """ + This test verifies that tasks are locked while being executed, and hence + new task runners are not able to pick up 'in progress' tasks. + """ + # Given + # 2 tasks + # One which is configured to just sleep for 3 seconds, to simulate a task + # being held for a short period of time + task_1 = Task.create( + sleep_task.task_identifier, scheduled_for=timezone.now(), args=(3,) + ) + task_1.save() + + # and another which should create an organisation + task_2 = Task.create( + dummy_task.task_identifier, + scheduled_for=timezone.now(), + args=("task 2 organisation",), + ) + task_2.save() + + # When + # we spawn a new thread to run the first task (configured to just sleep) + task_runner_thread = Thread(target=run_tasks) + task_runner_thread.start() + + # and subsequently attempt to run another task in the main thread + time.sleep(1) # wait for the thread to start and hold the task + task_runs = run_tasks() + + # Then + # the second task is run while the 1st task is held + assert task_runs[0].task == task_2 + + task_runner_thread.join() + + +def test_run_more_than_one_task(dummy_task: TaskHandler[[str, str]]) -> None: + # Given + num_tasks = 5 + + tasks = [] + for _ in range(num_tasks): + organisation_name = f"test-org-{uuid.uuid4()}" + tasks.append( + Task.create( + dummy_task.task_identifier, + scheduled_for=timezone.now(), + args=(organisation_name,), + ) + ) + Task.objects.bulk_create(tasks) + + # When + task_runs = run_tasks(5) + + # Then + assert len(task_runs) == num_tasks + + for task_run in task_runs: + assert task_run.result == TaskResult.SUCCESS.value + assert task_run.started_at + assert task_run.finished_at + assert task_run.error_details is None + + for task in tasks: + task.refresh_from_db() + assert task.completed + + +def test_recurring_tasks_are_unlocked_if_picked_up_but_not_executed( + db: None, + run_by_processor: None, +) -> None: + # Given + @register_recurring_task(run_every=timedelta(days=1)) + def my_task() -> None: + pass + + initialise() + + recurring_task = RecurringTask.objects.get( + task_identifier="test_unit_task_processor_processor.my_task" + ) + + # mimic the task having already been run so that it is next picked up, + # but not executed + now = timezone.now() + one_minute_ago = now - timedelta(minutes=1) + RecurringTaskRun.objects.create( + task=recurring_task, + started_at=one_minute_ago, + finished_at=now, + result=TaskResult.SUCCESS.name, + ) + + # When + run_recurring_tasks() + + # Then + recurring_task.refresh_from_db() + assert recurring_task.is_locked is False diff --git a/tests/unit/task_processor/test_unit_task_processor_tasks.py b/tests/unit/task_processor/test_unit_task_processor_tasks.py new file mode 100644 index 0000000..cfd8e19 --- /dev/null +++ b/tests/unit/task_processor/test_unit_task_processor_tasks.py @@ -0,0 +1,193 @@ +from datetime import timedelta + +from django.utils import timezone +from pytest_django.fixtures import DjangoAssertNumQueries, SettingsWrapper + +from task_processor.models import RecurringTask, RecurringTaskRun, Task +from task_processor.tasks import ( + clean_up_old_recurring_task_runs, + clean_up_old_tasks, +) + +now = timezone.now() +three_days_ago = now - timedelta(days=3) +one_day_ago = now - timedelta(days=1) +one_hour_from_now = now + timedelta(hours=1) +sixty_days_ago = now - timedelta(days=60) + + +def test_clean_up_old_tasks_does_nothing_when_no_tasks(db: None) -> None: + # Given + assert Task.objects.count() == 0 + + # When + clean_up_old_tasks() + + # Then + assert Task.objects.count() == 0 + + +def test_clean_up_old_recurring_task_runs_does_nothing_when_no_runs(db: None) -> None: + # Given + assert RecurringTaskRun.objects.count() == 0 + + # When + clean_up_old_recurring_task_runs() + + # Then + assert RecurringTaskRun.objects.count() == 0 + + +def test_clean_up_old_tasks( + settings: SettingsWrapper, + django_assert_num_queries: DjangoAssertNumQueries, + db: None, +) -> None: + # Given + settings.TASK_DELETE_RETENTION_DAYS = 2 + settings.TASK_DELETE_BATCH_SIZE = 1 + + # 2 completed tasks that were scheduled before retention period + for _ in range(2): + Task.objects.create( + task_identifier="some.identifier", + scheduled_for=three_days_ago, + completed=True, + ) + + # a task that has been completed but is within the retention period + task_in_retention_period = Task.objects.create( + task_identifier="some.identifier", scheduled_for=one_day_ago, completed=True + ) + + # and a task that has yet to be completed + future_task = Task.objects.create( + task_identifier="some.identifier", scheduled_for=one_hour_from_now + ) + + # and a task that failed + failed_task = Task.objects.create( + task_identifier="some.identifier", scheduled_for=three_days_ago, num_failures=3 + ) + + # When + with django_assert_num_queries(7): + # We expect 9 queries to be run here since we have set the delete batch size to 1 and there are 2 + # tasks we expect it to delete. Therefore, we have 2 loops, each consisting of 3 queries: + # 1. Grab the ids of any matching tasks + # 2. Delete all TaskRun objects for those task_id values + # 3. Delete all Task objects for those ids + # + # The final (7th) query is checking if any tasks match the delete filter (which returns false). + clean_up_old_tasks() + + # Then + assert list(Task.objects.all()) == [ + task_in_retention_period, + future_task, + failed_task, + ] + + +def test_clean_up_old_recurring_task_runs( + settings: SettingsWrapper, + django_assert_num_queries: DjangoAssertNumQueries, + db: None, +) -> None: + # Given + settings.RECURRING_TASK_RUN_RETENTION_DAYS = 2 + settings.ENABLE_CLEAN_UP_OLD_TASKS = True + + recurring_task = RecurringTask.objects.create( + task_identifier="some_identifier", run_every=timedelta(seconds=1) + ) + + # 2 task runs finished before retention period + for _ in range(2): + RecurringTaskRun.objects.create( + started_at=three_days_ago, + task=recurring_task, + finished_at=three_days_ago, + ) + + # a task run that is within the retention period + task_in_retention_period = RecurringTaskRun.objects.create( + task=recurring_task, + started_at=one_day_ago, + finished_at=one_day_ago, + ) + + # When + with django_assert_num_queries(1): + clean_up_old_recurring_task_runs() + + # Then + assert list(RecurringTaskRun.objects.all()) == [task_in_retention_period] + + +def test_clean_up_old_tasks_include_failed_tasks( + settings: SettingsWrapper, + django_assert_num_queries: DjangoAssertNumQueries, + db: None, +) -> None: + # Given + settings.TASK_DELETE_RETENTION_DAYS = 2 + settings.TASK_DELETE_INCLUDE_FAILED_TASKS = True + + # a task that failed + Task.objects.create( + task_identifier="some.identifier", scheduled_for=three_days_ago, num_failures=3 + ) + + # When + clean_up_old_tasks() + + # Then + assert not Task.objects.exists() + + +def test_clean_up_old_tasks_does_not_run_if_disabled( + settings: SettingsWrapper, + django_assert_num_queries: DjangoAssertNumQueries, + db: None, +) -> None: + # Given + settings.ENABLE_CLEAN_UP_OLD_TASKS = False + + task = Task.objects.create( + task_identifier="some.identifier", scheduled_for=sixty_days_ago + ) + + # When + with django_assert_num_queries(0): + clean_up_old_tasks() + + # Then + assert Task.objects.filter(id=task.id).exists() + + +def test_clean_up_old_recurring_task_runs_does_not_run_if_disabled( + settings: SettingsWrapper, + django_assert_num_queries: DjangoAssertNumQueries, + db: None, +) -> None: + # Given + settings.RECURRING_TASK_RUN_RETENTION_DAYS = 2 + settings.ENABLE_CLEAN_UP_OLD_TASKS = False + + recurring_task = RecurringTask.objects.create( + task_identifier="some_identifier", run_every=timedelta(seconds=1) + ) + + RecurringTaskRun.objects.create( + started_at=three_days_ago, + task=recurring_task, + finished_at=three_days_ago, + ) + + # When + with django_assert_num_queries(0): + clean_up_old_recurring_task_runs() + + # Then + assert RecurringTaskRun.objects.exists() diff --git a/tests/unit/task_processor/test_unit_task_processor_threads.py b/tests/unit/task_processor/test_unit_task_processor_threads.py new file mode 100644 index 0000000..3647d50 --- /dev/null +++ b/tests/unit/task_processor/test_unit_task_processor_threads.py @@ -0,0 +1,46 @@ +import logging +import typing +from typing import Type + +import pytest +from django.db import DatabaseError +from pytest_mock import MockerFixture + +from task_processor.threads import TaskRunner + +if typing.TYPE_CHECKING: + # This import breaks private-package-test workflow in core + from tests.unit.task_processor.conftest import GetTaskProcessorCaplog + + +@pytest.mark.parametrize( + "exception_class, exception_message", + [(DatabaseError, "Database error"), (Exception, "Generic error")], +) +def test_task_runner_is_resilient_to_errors( + db: None, + mocker: MockerFixture, + get_task_processor_caplog: "GetTaskProcessorCaplog", + exception_class: Type[Exception], + exception_message: str, +) -> None: + # Given + caplog = get_task_processor_caplog(logging.DEBUG) + + task_runner = TaskRunner() + mocker.patch( + "task_processor.threads.run_tasks", + side_effect=exception_class(exception_message), + ) + + # When + task_runner.run_iteration() + + # Then + assert len(caplog.records) == 1 + + assert caplog.records[0].levelno == logging.ERROR + assert ( + caplog.records[0].message + == f"Received error retrieving tasks: {exception_message}." + ) From 628b646bbf971f9cc351b9d487bcda0c55e18029 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 00:29:28 +0000 Subject: [PATCH 02/14] yes root --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 80629a6..c621aea 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ install-poetry: .PHONY: install-packages install-packages: - poetry install --no-root $(opts) + poetry install $(opts) .PHONY: install install: install-pip install-poetry install-packages From a44aad2f273a1d4a296ec8461d902c511749365d Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 00:38:02 +0000 Subject: [PATCH 03/14] prefer CommandError --- src/common/core/management/commands/waitfordb.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/common/core/management/commands/waitfordb.py b/src/common/core/management/commands/waitfordb.py index 91c3746..1635a89 100644 --- a/src/common/core/management/commands/waitfordb.py +++ b/src/common/core/management/commands/waitfordb.py @@ -3,7 +3,7 @@ from argparse import ArgumentParser from typing import Any -from django.core.management import BaseCommand +from django.core.management import BaseCommand, CommandError from django.db import OperationalError, connections from django.db.migrations.executor import MigrationExecutor @@ -54,7 +54,7 @@ def handle( if time.monotonic() - start > wait_for: msg = f"Failed to connect to DB within {wait_for} seconds." logger.error(msg) - exit(msg) + raise CommandError(msg) conn = connections.create_connection(database) try: @@ -75,7 +75,7 @@ def handle( if time.monotonic() - start > wait_for: msg = f"Didn't detect applied migrations for {wait_for} seconds." logger.error(msg) - exit(msg) + raise CommandError(msg) conn = connections[database] executor = MigrationExecutor(conn) From 95e6d081f9fda31d22294cac30686bb37c7d9188 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 00:38:37 +0000 Subject: [PATCH 04/14] fix typo --- src/task_processor/decorators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/task_processor/decorators.py b/src/task_processor/decorators.py index 8e865b9..8b74078 100644 --- a/src/task_processor/decorators.py +++ b/src/task_processor/decorators.py @@ -144,7 +144,7 @@ def register_task_handler( # noqa: C901 the task processor Defaults to `None` (infinite). :param TaskPriority priority: task priority. :param bool transaction_on_commit: (`SEPARATE_THREAD` task run method only) - Whether to wrap the task call in `transanction.on_commit`. Defaults to `True`. + Whether to wrap the task call in `transaction.on_commit`. Defaults to `True`. We need this for the task to be able to access data committed with the current transaction. If the task is invoked outside of a transaction, it will start immediately. From 672cfaf0ffa61eaeb9ab19cbd72acbc7d0e4a65a Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 13:04:58 +0000 Subject: [PATCH 05/14] add changelog to project.urls --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 84aaa01..c34e06d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,7 @@ readme = "README.md" dynamic = ["classifiers"] [project.urls] +Changelog = "https://github.com/flagsmith/flagsmith-common/blob/main/CHANGELOG.md" Download = "https://github.com/flagsmith/flagsmith-common/releases" Homepage = "https://flagsmith.com" Issues = "https://github.com/flagsmith/flagsmith-common/issues" From e382c621c0e45488f969ee562b522ea7357df2ac Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 13:05:21 +0000 Subject: [PATCH 06/14] make sure the entrypoint works --- settings/dev.py | 3 +++ src/common/core/main.py | 40 ++++++++++++++++++++++++++++------------ src/common/core/utils.py | 3 +++ 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/settings/dev.py b/settings/dev.py index e9a5a64..cdd1655 100644 --- a/settings/dev.py +++ b/settings/dev.py @@ -47,3 +47,6 @@ TASK_DELETE_RUN_EVERY = timedelta(days=1) TASK_DELETE_RUN_TIME = time(5, 0, 0) TASK_RUN_METHOD = TaskRunMethod.TASK_PROCESSOR + +# Avoid models.W042 warnings +DEFAULT_AUTO_FIELD = "django.db.models.AutoField" diff --git a/src/common/core/main.py b/src/common/core/main.py index 8d53dc8..695a204 100644 --- a/src/common/core/main.py +++ b/src/common/core/main.py @@ -8,27 +8,25 @@ logger = logging.getLogger(__name__) -def main() -> None: +def ensure_cli_env() -> None: """ - The main entry point to the Flagsmith application. - - An equivalent to Django's `manage.py` script, this module is used to run management commands. - - It's installed as the `flagsmith` command. - - Everything that needs to be run before Django is started should be done here. + Set up the environment for the main entry point of the application. + """ + # TODO @khvn26 Move logging setup to here - The end goal is to eventually replace Core API's `run-docker.sh` with this. + # Currently we don't install Flagsmith modues as a package, so we need to add + # $CWD to the Python path to be able to import them + sys.path.append(os.getcwd()) - Usage: - `flagsmith [options]` - """ + # TODO @khvn26 We should find a better way to pre-set the Django settings module + # without resorting to it being set outside of the application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev") # Set up Prometheus' multiprocess mode if "PROMETHEUS_MULTIPROC_DIR" not in os.environ: prometheus_multiproc_dir = tempfile.TemporaryDirectory( prefix="prometheus_multiproc", + delete=False, ) logger.info( "Created %s for Prometheus multi-process mode", @@ -36,5 +34,23 @@ def main() -> None: ) os.environ["PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir.name + +def main() -> None: + """ + The main entry point to the Flagsmith application. + + An equivalent to Django's `manage.py` script, this module is used to run management commands. + + It's installed as the `flagsmith` command. + + Everything that needs to be run before Django is started should be done here. + + The end goal is to eventually replace Core API's `run-docker.sh` with this. + + Usage: + `flagsmith [options]` + """ + ensure_cli_env() + # Run Django execute_from_command_line(sys.argv) diff --git a/src/common/core/utils.py b/src/common/core/utils.py index 4b785b4..2330e0a 100644 --- a/src/common/core/utils.py +++ b/src/common/core/utils.py @@ -1,4 +1,5 @@ import json +import logging import pathlib from functools import lru_cache from typing import NotRequired, TypedDict @@ -11,6 +12,8 @@ UNKNOWN = "unknown" VERSIONS_INFO_FILE_LOCATION = ".versions.json" +logger = logging.getLogger(__name__) + class SelfHostedData(TypedDict): has_users: bool From 3a52c2877c77d8108c5cdedf280d9f41ccd35fc0 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 13:16:11 +0000 Subject: [PATCH 07/14] fix tempdir on 3.11 --- src/common/core/main.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/src/common/core/main.py b/src/common/core/main.py index 695a204..69054d8 100644 --- a/src/common/core/main.py +++ b/src/common/core/main.py @@ -1,17 +1,22 @@ +import contextlib import logging import os import sys import tempfile +import typing from django.core.management import execute_from_command_line logger = logging.getLogger(__name__) -def ensure_cli_env() -> None: +@contextlib.contextmanager +def ensure_cli_env() -> typing.Generator[None, None, None]: """ Set up the environment for the main entry point of the application. """ + ctx = contextlib.ExitStack() + # TODO @khvn26 Move logging setup to here # Currently we don't install Flagsmith modues as a package, so we need to add @@ -24,15 +29,20 @@ def ensure_cli_env() -> None: # Set up Prometheus' multiprocess mode if "PROMETHEUS_MULTIPROC_DIR" not in os.environ: - prometheus_multiproc_dir = tempfile.TemporaryDirectory( - prefix="prometheus_multiproc", - delete=False, + prometheus_multiproc_dir_name = ctx.enter_context( + tempfile.TemporaryDirectory( + prefix="prometheus_multiproc", + ) ) + logger.info( "Created %s for Prometheus multi-process mode", - prometheus_multiproc_dir.name, + prometheus_multiproc_dir_name, ) - os.environ["PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir.name + os.environ["PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir_name + + with ctx: + yield def main() -> None: @@ -50,7 +60,6 @@ def main() -> None: Usage: `flagsmith [options]` """ - ensure_cli_env() - - # Run Django - execute_from_command_line(sys.argv) + with ensure_cli_env(): + # Run Django + execute_from_command_line(sys.argv) From c0ae170af89ae268e03bde1b273b6e149dc95ffa Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 13:44:17 +0000 Subject: [PATCH 08/14] take care of `RUN_BY_PROCESSOR` --- src/common/core/main.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/common/core/main.py b/src/common/core/main.py index 69054d8..df2ffdb 100644 --- a/src/common/core/main.py +++ b/src/common/core/main.py @@ -41,6 +41,10 @@ def ensure_cli_env() -> typing.Generator[None, None, None]: ) os.environ["PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir_name + if "task-processor" in sys.argv: + # A bit hacky way to signal we're not running the API + os.environ["RUN_BY_PROCESSOR"] = "true" + with ctx: yield From 8e800cf2e38ded636d1a8f3217fa9faf6b968b98 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 16:06:54 +0000 Subject: [PATCH 09/14] remove unused logger --- src/common/core/utils.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/common/core/utils.py b/src/common/core/utils.py index 2330e0a..4b785b4 100644 --- a/src/common/core/utils.py +++ b/src/common/core/utils.py @@ -1,5 +1,4 @@ import json -import logging import pathlib from functools import lru_cache from typing import NotRequired, TypedDict @@ -12,8 +11,6 @@ UNKNOWN = "unknown" VERSIONS_INFO_FILE_LOCATION = ".versions.json" -logger = logging.getLogger(__name__) - class SelfHostedData(TypedDict): has_users: bool From 6d027c0d9458ca29a425edec3079f8880a4471d7 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 16:09:51 +0000 Subject: [PATCH 10/14] expand `ensure_cli_env` docstring --- src/common/core/main.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/common/core/main.py b/src/common/core/main.py index df2ffdb..d6cd94e 100644 --- a/src/common/core/main.py +++ b/src/common/core/main.py @@ -13,7 +13,18 @@ @contextlib.contextmanager def ensure_cli_env() -> typing.Generator[None, None, None]: """ - Set up the environment for the main entry point of the application. + Set up the environment for the main entry point of the application + and clean up after it's done. + + Add environment-related code that needs to happen before and after Django is involved + to here. + + Use as a context manager, e.g.: + + ```python + with ensure_cli_env(): + main() + ``` """ ctx = contextlib.ExitStack() From af56f27491536c23afc011a3b68b6854867dceec Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 16:10:30 +0000 Subject: [PATCH 11/14] fix typo Co-authored-by: Matthew Elwell --- src/common/core/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/core/main.py b/src/common/core/main.py index d6cd94e..6bae997 100644 --- a/src/common/core/main.py +++ b/src/common/core/main.py @@ -30,7 +30,7 @@ def ensure_cli_env() -> typing.Generator[None, None, None]: # TODO @khvn26 Move logging setup to here - # Currently we don't install Flagsmith modues as a package, so we need to add + # Currently we don't install Flagsmith modules as a package, so we need to add # $CWD to the Python path to be able to import them sys.path.append(os.getcwd()) From 37af8b1fc5a09e7463ababe9e83ddb800c475162 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 17:07:00 +0000 Subject: [PATCH 12/14] fix wording Co-authored-by: Matthew Elwell --- src/common/core/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/core/main.py b/src/common/core/main.py index 6bae997..3f5749a 100644 --- a/src/common/core/main.py +++ b/src/common/core/main.py @@ -53,7 +53,7 @@ def ensure_cli_env() -> typing.Generator[None, None, None]: os.environ["PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir_name if "task-processor" in sys.argv: - # A bit hacky way to signal we're not running the API + # A hacky way to signal we're not running the API os.environ["RUN_BY_PROCESSOR"] = "true" with ctx: From 2b051eff19daa22bde2d30e1c1055925f7dc0ea5 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 17:09:47 +0000 Subject: [PATCH 13/14] improve code organisation --- src/common/migrations/__init__.py | 0 src/{task_processor => common}/migrations/helpers/__init__.py | 2 +- .../migrations/helpers/postgres_helpers.py | 0 src/task_processor/migrations/0004_recreate_task_indexes.py | 2 +- .../migrations/0005_update_conditional_index_conditions.py | 2 +- .../migrations/0008_add_get_task_to_process_function.py | 2 +- .../migrations/0011_add_priority_to_get_tasks_to_process.py | 2 +- .../migrations/0012_add_locked_at_and_timeout.py | 3 ++- 8 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 src/common/migrations/__init__.py rename src/{task_processor => common}/migrations/helpers/__init__.py (76%) rename src/{task_processor => common}/migrations/helpers/postgres_helpers.py (100%) diff --git a/src/common/migrations/__init__.py b/src/common/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/task_processor/migrations/helpers/__init__.py b/src/common/migrations/helpers/__init__.py similarity index 76% rename from src/task_processor/migrations/helpers/__init__.py rename to src/common/migrations/helpers/__init__.py index 4fb432d..f9b2450 100644 --- a/src/task_processor/migrations/helpers/__init__.py +++ b/src/common/migrations/helpers/__init__.py @@ -4,6 +4,6 @@ and simplified the imports by defining the __all__ attribute. """ -from task_processor.migrations.helpers.postgres_helpers import PostgresOnlyRunSQL +from common.migrations.helpers.postgres_helpers import PostgresOnlyRunSQL __all__ = ["PostgresOnlyRunSQL"] diff --git a/src/task_processor/migrations/helpers/postgres_helpers.py b/src/common/migrations/helpers/postgres_helpers.py similarity index 100% rename from src/task_processor/migrations/helpers/postgres_helpers.py rename to src/common/migrations/helpers/postgres_helpers.py diff --git a/src/task_processor/migrations/0004_recreate_task_indexes.py b/src/task_processor/migrations/0004_recreate_task_indexes.py index 4919518..2a7acf5 100644 --- a/src/task_processor/migrations/0004_recreate_task_indexes.py +++ b/src/task_processor/migrations/0004_recreate_task_indexes.py @@ -2,7 +2,7 @@ from django.db import migrations, models -from task_processor.migrations.helpers import PostgresOnlyRunSQL +from common.migrations.helpers import PostgresOnlyRunSQL class Migration(migrations.Migration): diff --git a/src/task_processor/migrations/0005_update_conditional_index_conditions.py b/src/task_processor/migrations/0005_update_conditional_index_conditions.py index 2b569b7..e601fc2 100644 --- a/src/task_processor/migrations/0005_update_conditional_index_conditions.py +++ b/src/task_processor/migrations/0005_update_conditional_index_conditions.py @@ -2,7 +2,7 @@ from django.db import migrations, models -from task_processor.migrations.helpers import PostgresOnlyRunSQL +from common.migrations.helpers import PostgresOnlyRunSQL class Migration(migrations.Migration): diff --git a/src/task_processor/migrations/0008_add_get_task_to_process_function.py b/src/task_processor/migrations/0008_add_get_task_to_process_function.py index 21cca62..3c5c8af 100644 --- a/src/task_processor/migrations/0008_add_get_task_to_process_function.py +++ b/src/task_processor/migrations/0008_add_get_task_to_process_function.py @@ -2,7 +2,7 @@ from django.db import migrations -from task_processor.migrations.helpers import PostgresOnlyRunSQL +from common.migrations.helpers import PostgresOnlyRunSQL import os diff --git a/src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py b/src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py index 2270d7f..d4a6efc 100644 --- a/src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py +++ b/src/task_processor/migrations/0011_add_priority_to_get_tasks_to_process.py @@ -2,7 +2,7 @@ from django.db import migrations -from task_processor.migrations.helpers import PostgresOnlyRunSQL +from common.migrations.helpers import PostgresOnlyRunSQL import os diff --git a/src/task_processor/migrations/0012_add_locked_at_and_timeout.py b/src/task_processor/migrations/0012_add_locked_at_and_timeout.py index 866d40e..f5c079b 100644 --- a/src/task_processor/migrations/0012_add_locked_at_and_timeout.py +++ b/src/task_processor/migrations/0012_add_locked_at_and_timeout.py @@ -1,10 +1,11 @@ # Generated by Django 3.2.23 on 2025-01-06 04:51 -from task_processor.migrations.helpers import PostgresOnlyRunSQL import datetime from django.db import migrations, models import os +from common.migrations.helpers import PostgresOnlyRunSQL + class Migration(migrations.Migration): From 885e23b460f68881d78a82afc948af1554209727 Mon Sep 17 00:00:00 2001 From: Kim Gustyr Date: Thu, 27 Mar 2025 17:35:17 +0000 Subject: [PATCH 14/14] down means down --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c621aea..11a565f 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ docker-up: .PHONY: docker-down docker-down: - docker compose stop + docker compose down .PHONY: test test: