diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 056be1236..f74f4d831 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -71,6 +71,17 @@ updates: - dependency-name: "@types/node" update-types: ["version-update:semver-major"] + - package-ecosystem: "gomod" + directory: "/lambdas/goose-migrator-lambda/src" + schedule: + interval: "daily" + cooldown: + default-days: 7 + groups: + go-dependencies: + patterns: + - "*" + - package-ecosystem: "pre-commit" directory: "/" schedule: diff --git a/.github/workflows/stage-2-test.yaml b/.github/workflows/stage-2-test.yaml index f54cf9580..0ce76ecf1 100644 --- a/.github/workflows/stage-2-test.yaml +++ b/.github/workflows/stage-2-test.yaml @@ -61,9 +61,21 @@ jobs: test-name: "Jest Lambda Tests" report-title: "Lambdas Coverage Report" + test-goose-migrations: + name: "Test Goose DB Migrations" + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + - name: "Initialize mise" + uses: ./.github/actions/init-mise + - name: "Run Goose Migration Tests" + run: mise run test-migrations + perform-static-analysis: name: "Perform static analysis" - needs: [test-unit-ui, test-unit-lambda] + needs: [test-unit-ui, test-unit-lambda, test-goose-migrations] runs-on: ubuntu-latest permissions: id-token: write diff --git a/.gitignore b/.gitignore index c629dcbad..330912a60 100644 --- a/.gitignore +++ b/.gitignore @@ -174,3 +174,8 @@ junit.xml # Test configuration with personal credentials tests/users.ts + +# Goose migrator build artifacts +lambdas/goose-migrator-lambda/goose-migrator-lambda.zip +lambdas/goose-migrator-lambda/src/bootstrap +.migrator-build-cache/ diff --git a/.gitleaksignore b/.gitleaksignore index cceb449a3..a271ef7f3 100644 --- a/.gitleaksignore +++ b/.gitleaksignore @@ -1,3 +1,12 @@ # SEE: https://github.com/gitleaks/gitleaks/blob/master/README.md#gitleaksignore cd9c0efec38c5d63053dd865e5d4e207c0760d91:docs/guides/Perform_static_analysis.md:generic-api-key:37 + +# False positives: AWS Secrets Manager path names, not actual secrets + +lambdas/goose-migrator-lambda/migrations/000002_seed_home_test_data.sql:generic-api-key:18 +lambdas/goose-migrator-lambda/migrations/000002_seed_home_test_data.sql:generic-api-key:42 + +# False positives: OAuth client_id values (public identifiers, not secrets) +lambdas/goose-migrator-lambda/migrations/000007_supplier_data_update.sql:generic-api-key:3 +lambdas/goose-migrator-lambda/migrations/000007_supplier_data_update.sql:generic-api-key:8 diff --git a/.mise.toml b/.mise.toml index 179fa8d09..e5d770099 100644 --- a/.mise.toml +++ b/.mise.toml @@ -62,6 +62,12 @@ node = "24.14.1" # https://github.com/pnpm/pnpm/releases "npm:pnpm" = "10.33.0" +# https://go.dev/dl/ +go = "1.26.2" + +# https://github.com/pressly/goose/releases +"go:github.com/pressly/goose/v3/cmd/goose" = "3.27.0" + [tasks.pre-commit] description = "Run pre-commit checks on all files" run = "pre-commit run --all-files --show-diff-on-failure --color=always" @@ -73,3 +79,11 @@ run = "pre-commit run --from-ref origin/main --to-ref HEAD --show-diff-on-failur [tasks.install-pnpm] description = "Install pnpm dependencies" run = "pnpm install --frozen-lockfile" + +[tasks.test-migrations] +description = "Test Goose DB migrations against local PostgreSQL in Docker" +run = "./lambdas/goose-migrator-lambda/scripts/test-migrations.sh" + +[tasks.test-migrations-keep] +description = "Test migrations and keep PostgreSQL container running" +run = "KEEP_CONTAINER=true ./lambdas/goose-migrator-lambda/scripts/test-migrations.sh" diff --git a/README.md b/README.md index db23a2fcf..a365774a6 100644 --- a/README.md +++ b/README.md @@ -122,7 +122,7 @@ After running `pnpm start`, use targeted commands instead of restarting everythi pnpm run local:deploy ``` -- **Database schema or seed changes** (rerun DB migration container, including goose migrations): +- **Database schema or seed changes** (rerun goose migrations and seed data): ```shell pnpm run local:service:db:migrate @@ -170,7 +170,7 @@ After running `pnpm start`, use targeted commands instead of restarting everythi pnpm run local:frontend:restart ``` -- **Restart backend containers only** (Postgres, LocalStack, WireMock, db-migrate): +- **Restart backend containers only** (Postgres, LocalStack, WireMock): ```shell pnpm run local:compose -- stop postgres-db localstack wiremock diff --git a/database/03-seed-hometest-data.sql b/database/03-seed-hometest-data.sql index 7cdf61fe9..08cde333f 100644 --- a/database/03-seed-hometest-data.sql +++ b/database/03-seed-hometest-data.sql @@ -8,64 +8,33 @@ SET search_path TO hometest; -INSERT INTO supplier ( - supplier_id, - supplier_name, - service_url, - website_url, - client_secret_name, - client_id, - oauth_token_path, - order_path, - oauth_scope, - results_path -) -VALUES ( - 'c1a2b3c4-1234-4def-8abc-123456789abc', - 'Preventx', - 'http://wiremock:8080', - 'https://www.preventx.com/', - 'test_supplier_client_secret', - 'preventx-client-id', - '/oauth/token', - '/order', - 'orders results', - '/results' -) -ON CONFLICT (supplier_id) DO NOTHING; - -INSERT INTO supplier ( - supplier_id, - supplier_name, - service_url, - website_url, - client_secret_name, - client_id, - oauth_token_path, - order_path, - oauth_scope, - results_path -) -VALUES ( - 'd2b3c4d5-2345-4abc-8def-23456789abcd', - 'SH:24', - 'http://wiremock:8080', - 'https://sh24.org.uk/', - 'test_supplier_client_secret', - 'sh24-client-id', - '/oauth/token', - '/order', - 'order results', - '/results' -) -ON CONFLICT (supplier_id) DO NOTHING; +-- Override dev/staging supplier credentials and service URL with local values. +-- Goose migrations (000002, 000006, 000009) insert these suppliers with +-- environment-specific credentials/URLs/paths that don't exist or don't match +-- locally. WireMock expects: oauth at /oauth/token, orders at /order. +UPDATE supplier +SET + client_secret_name = 'test_supplier_client_secret', + service_url = 'http://wiremock:8080', + oauth_token_path = '/oauth/token', + order_path = '/order' +WHERE supplier_id IN ( + '11111111-1111-4111-8111-111111111111', + '77777777-7777-4777-8777-777777777777' +); +-- PCR test type (goose migration 000005 only seeds 31676001) INSERT INTO test_type (test_code, description) -VALUES -('31676001', 'HIV antigen test'), -('PCR', 'Polymerase Chain Reaction') +VALUES ('PCR', 'Polymerase Chain Reaction') ON CONFLICT (test_code) DO NOTHING; +-- PCR offerings (goose migration 000012 only seeds test code 31676001) +INSERT INTO la_supplier_offering (offering_id, supplier_id, test_code, la_code, effective_from) +VALUES +('20000002-0000-4000-8000-000000000001', '11111111-1111-4111-8111-111111111111', 'PCR', '1440', DATE '2026-02-09'), +('20000002-0000-4000-8000-000000000002', '77777777-7777-4777-8777-777777777777', 'PCR', '4230', DATE '2026-02-09') +ON CONFLICT (la_code, supplier_id, test_code) DO NOTHING; + INSERT INTO patient_mapping (patient_uid, nhs_number, birth_date) VALUES ( 'e3c4d5e6-3456-4bcd-8efa-3456789abcde', @@ -82,18 +51,10 @@ VALUES ( ) ON CONFLICT (nhs_number) DO NOTHING; -INSERT INTO la_supplier_offering (offering_id, supplier_id, test_code, la_code, effective_from) -VALUES -('a5e6f7a8-5678-4def-8abc-56789abcdefa', 'c1a2b3c4-1234-4def-8abc-123456789abc', '31676001', '1440', DATE '2026-02-09'), -('b6f7a8b9-6789-4efa-8bcd-6789abcdefab', 'c1a2b3c4-1234-4def-8abc-123456789abc', 'PCR', '1440', DATE '2026-02-09'), -('c7a8b9c0-7890-4fab-8cde-789abcdefabc', 'd2b3c4d5-2345-4abc-8def-23456789abcd', '31676001', '4230', DATE '2026-02-09'), -('d8b9c0d1-8901-4abc-8def-89abcdefabcd', 'd2b3c4d5-2345-4abc-8def-23456789abcd', 'PCR', '4230', DATE '2026-02-09') -ON CONFLICT (la_code, supplier_id, test_code) DO NOTHING; - INSERT INTO test_order (order_uid, supplier_id, patient_uid, test_code, originator) VALUES ( 'e9c0d1e2-9012-4bcd-8efa-90abcdefabcd', - 'c1a2b3c4-1234-4def-8abc-123456789abc', + '11111111-1111-4111-8111-111111111111', 'e3c4d5e6-3456-4bcd-8efa-3456789abcde', '31676001', 'seed-migration' @@ -103,7 +64,7 @@ ON CONFLICT (order_uid) DO NOTHING; INSERT INTO test_order (order_uid, supplier_id, patient_uid, test_code, originator) VALUES ( 'fab1c2d3-0123-4cde-8fab-01abcdefabcd', - 'd2b3c4d5-2345-4abc-8def-23456789abcd', + '77777777-7777-4777-8777-777777777777', 'f4d5e6f7-4567-4cde-8fab-456789abcdef', 'PCR', 'seed-migration' diff --git a/database/migrations/000002_static_data.sql b/database/migrations/000002_static_data.sql deleted file mode 100644 index 0c5ee72f0..000000000 --- a/database/migrations/000002_static_data.sql +++ /dev/null @@ -1,31 +0,0 @@ --- +goose Up - -/* - ================================================================= - STATIC DATA - - This is for data that will not change between local and PoC env - ================================================================= - */ - -INSERT INTO status_type (status_code, description) -VALUES -('GENERATED', 'Order has been generated by the order service'), -('QUEUED', 'Order has been queued for processing'), -('SUBMITTED', 'Order has been submitted to the supplier'), -('CONFIRMED', 'Order has been confirmed by the supplier'), -('DISPATCHED', 'Test has been dispatched to the patient'), -('RECEIVED', 'Test has been received by the laboratory'), -('COMPLETE', 'Test results are ready from the supplier') -ON CONFLICT DO NOTHING; - -INSERT INTO result_type (result_code, description) -VALUES -('RESULT_AVAILABLE', 'Test results are available from the supplier'), -('RESULT_WITHHELD', 'Test result are being withheld by the supplier for any reason') -ON CONFLICT DO NOTHING; - - --- +goose Down -DELETE FROM status_type; -DELETE FROM result_type; diff --git a/database/migrations/000004_add_results_to_supplier_table.sql b/database/migrations/000004_add_results_to_supplier_table.sql deleted file mode 100644 index e48577c1d..000000000 --- a/database/migrations/000004_add_results_to_supplier_table.sql +++ /dev/null @@ -1,17 +0,0 @@ --- +goose Up -ALTER TABLE supplier -ADD COLUMN results_path varchar(255); - -UPDATE supplier -SET - results_path = '/api/results' -WHERE supplier_id = 'c1a2b3c4-1234-4def-8abc-123456789abc'; - -UPDATE supplier -SET - results_path = '/nhs_home_test/results' -WHERE supplier_id = 'd2b3c4d5-2345-4abc-8def-23456789abcd'; - --- +goose Down -ALTER TABLE supplier -DROP COLUMN IF EXISTS results_path; diff --git a/database/migrations/000009_add_order_status_reminder_table.sql b/database/migrations/000009_add_order_status_reminder_table.sql deleted file mode 100644 index 8be83782a..000000000 --- a/database/migrations/000009_add_order_status_reminder_table.sql +++ /dev/null @@ -1,30 +0,0 @@ --- +goose Up -CREATE TYPE reminder_status AS ENUM ( - 'SCHEDULED', - 'QUEUED', - 'FAILED', - 'CANCELLED' -); - -CREATE TABLE IF NOT EXISTS order_status_reminder -( - reminder_id uuid PRIMARY KEY DEFAULT gen_random_uuid(), - order_uid uuid NOT NULL REFERENCES test_order (order_uid) ON DELETE CASCADE, - trigger_status varchar(50) NOT NULL REFERENCES status_type (status_code), - reminder_number smallint NOT NULL CHECK (reminder_number >= 1), - status reminder_status NOT NULL, - triggered_at timestamp with time zone NOT NULL, - sent_at timestamp with time zone, - created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, - CONSTRAINT uq_order_status_reminder - UNIQUE (order_uid, trigger_status, reminder_number) -); - -CREATE INDEX IF NOT EXISTS idx_order_status_reminder_status_triggered_at -ON order_status_reminder (status, triggered_at); - - --- +goose Down -DROP INDEX IF EXISTS idx_order_status_reminder_status_triggered_at; -DROP TABLE IF EXISTS order_status_reminder; -DROP TYPE IF EXISTS reminder_status; diff --git a/lambdas/README.md b/lambdas/README.md index abe04264a..b745cef6e 100644 --- a/lambdas/README.md +++ b/lambdas/README.md @@ -15,6 +15,16 @@ lambdas/ │ │ ├── index.ts │ │ └── [other files] │ └── lib # shared code + ├── goose-migrator-lambda/ + │ ├── src/ + │ │ ├── main.go + │ │ ├── go.mod + │ │ └── go.sum + │ ├── migrations/ + │ │ └── *.sql + │ └── scripts/ + │ ├── build.sh + │ └── test-migrations.sh └── package.json ``` @@ -22,7 +32,7 @@ lambdas/ ### Directory Naming -- All Lambdas must be direct subdirectories of `src/` +- All TypeScript Lambdas must be direct subdirectories of `src/` - Lambda directory names must end with `-lambda` suffix - Each Lambda directory contains its handler and related code @@ -114,3 +124,28 @@ pnpm test ``` **Note:** Integration tests are slower (~10-30s startup) but provide confidence that infrastructure components work correctly with real external systems. + +## Goose Migrator Lambda (Go) + +The `goose-migrator-lambda/` contains a Go-based Lambda that runs database migrations using [Goose](https://github.com/pressly/goose). Unlike the TypeScript lambdas above, it has its own build process and directory structure. + +Note that `lambdas/goose-migrator-lambda/migrations/` is the source of truth for all goose migration files in this repository. + +### Build + +```bash +# Build the Lambda zip (uses content hashing to skip unnecessary rebuilds) +./lambdas/goose-migrator-lambda/scripts/build.sh +``` + +Output: `lambdas/goose-migrator-lambda/goose-migrator-lambda.zip` + +### Test Migrations + +```bash +# Run migrations against a local PostgreSQL container (requires Docker) +mise run test-migrations + +# Same, but keep the PostgreSQL container running +mise run test-migrations-keep +``` diff --git a/database/migrations/000001_create_initial_home_test_tables.sql b/lambdas/goose-migrator-lambda/migrations/000001_create_initial_home_test_tables.sql similarity index 100% rename from database/migrations/000001_create_initial_home_test_tables.sql rename to lambdas/goose-migrator-lambda/migrations/000001_create_initial_home_test_tables.sql diff --git a/lambdas/goose-migrator-lambda/migrations/000002_seed_home_test_data.sql b/lambdas/goose-migrator-lambda/migrations/000002_seed_home_test_data.sql new file mode 100644 index 000000000..c1b35542f --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000002_seed_home_test_data.sql @@ -0,0 +1,54 @@ +-- +goose Up +INSERT INTO supplier ( + supplier_id, + supplier_name, + service_url, + website_url, + client_secret_name, + client_id, + oauth_token_path, + order_path, + oauth_scope +) +VALUES ( + '11111111-1111-4111-8111-111111111111', + 'Preventx', + 'https://func-nhshometest-dev.azurewebsites.net/', + 'https://www.preventx.com/', + 'nhs-hometest/dev/preventex-dev-client-secret', + '7e9b8f16-4686-46f4-903e-2d364774fc82', + '/api/oauth', + '/api/order', + 'orders results' +) +ON CONFLICT (supplier_id) DO NOTHING; + +INSERT INTO supplier ( + supplier_id, + supplier_name, + service_url, + website_url, + client_secret_name, + client_id, + oauth_token_path, + order_path, + oauth_scope +) +VALUES ( + '77777777-7777-4777-8777-777777777777', + 'SH:24', + 'https://admin.qa3.sh24.org.uk/', + 'https://sh24.org.uk/', + 'nhs-hometest/dev/sh24-dev-client-secret', + 'zrgmf33Zdk-515BIMrds29v9Z3KzoH-tfYDgxLsYtZE', + '/oauth/token', + '/order', + 'order results' +) +ON CONFLICT (supplier_id) DO NOTHING; + +-- +goose Down +DELETE FROM supplier +WHERE supplier_id = '11111111-1111-4111-8111-111111111111'; +DELETE FROM supplier +WHERE supplier_id = '77777777-7777-4777-8777-777777777777'; diff --git a/database/migrations/000003_add_order_status_columns.sql b/lambdas/goose-migrator-lambda/migrations/000003_add_order_status_columns.sql similarity index 100% rename from database/migrations/000003_add_order_status_columns.sql rename to lambdas/goose-migrator-lambda/migrations/000003_add_order_status_columns.sql diff --git a/lambdas/goose-migrator-lambda/migrations/000004_static_data.sql b/lambdas/goose-migrator-lambda/migrations/000004_static_data.sql new file mode 100644 index 000000000..112a2fbc7 --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000004_static_data.sql @@ -0,0 +1,47 @@ +-- +goose Up + +/* + ================================================================= + STATIC DATA + + This is for data that will not change between local and PoC env + ================================================================= + */ + +INSERT INTO status_type (status_code, description) +VALUES +('GENERATED', 'Order has been generated by the order service'), +('QUEUED', 'Order has been queued for processing'), +('PLACED', 'Order has been placed with the supplier'), +('ORDER_RECEIVED', 'Order has been confirmed by the supplier'), +('DISPATCHED', 'Test has been dispatched to the patient'), +('RECEIVED', 'Test has been received by the laboratory'), +('COMPLETE', 'Test results are ready from the supplier') +ON CONFLICT DO NOTHING; + +INSERT INTO result_type (result_code, description) +VALUES +('RESULT_AVAILABLE', 'Test results are available from the supplier'), +('RESULT_WITHHELD', 'Test result are being withheld by the supplier for any reason') +ON CONFLICT DO NOTHING; + + +-- +goose Down +DELETE FROM status_type +WHERE status_code = 'GENERATED'; +DELETE FROM status_type +WHERE status_code = 'QUEUED'; +DELETE FROM status_type +WHERE status_code = 'PLACED'; +DELETE FROM status_type +WHERE status_code = 'ORDER_RECEIVED'; +DELETE FROM status_type +WHERE status_code = 'DISPATCHED'; +DELETE FROM status_type +WHERE status_code = 'RECEIVED'; +DELETE FROM status_type +WHERE status_code = 'COMPLETE'; +DELETE FROM result_type +WHERE result_code = 'RESULT_AVAILABLE'; +DELETE FROM result_type +WHERE result_code = 'RESULT_WITHHELD'; diff --git a/lambdas/goose-migrator-lambda/migrations/000005_seed_test_type.sql b/lambdas/goose-migrator-lambda/migrations/000005_seed_test_type.sql new file mode 100644 index 000000000..fbfd4fb25 --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000005_seed_test_type.sql @@ -0,0 +1,11 @@ +-- +goose Up + +INSERT INTO test_type (test_code, description) +VALUES +('31676001', 'HIV antigen test') +ON CONFLICT (test_code) DO NOTHING; + + +-- +goose Down +DELETE FROM test_type +WHERE test_code = '31676001'; diff --git a/lambdas/goose-migrator-lambda/migrations/000006_supplier_data_update.sql b/lambdas/goose-migrator-lambda/migrations/000006_supplier_data_update.sql new file mode 100644 index 000000000..ffcee25c7 --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000006_supplier_data_update.sql @@ -0,0 +1,17 @@ +-- +goose Up +UPDATE supplier +SET service_url = 'https://func-nhshometest-staging.azurewebsites.net/' +WHERE supplier_id = '11111111-1111-4111-8111-111111111111'; + +UPDATE supplier +SET service_url = 'https://admin.qa1.sh24.org.uk/' +WHERE supplier_id = '77777777-7777-4777-8777-777777777777'; + +-- +goose Down +UPDATE supplier +SET service_url = 'https://func-nhshometest-dev.azurewebsites.net/' +WHERE supplier_id = '11111111-1111-4111-8111-111111111111'; + +UPDATE supplier +SET service_url = 'https://admin.qa3.sh24.org.uk/' +WHERE supplier_id = '77777777-7777-4777-8777-777777777777'; diff --git a/lambdas/goose-migrator-lambda/migrations/000007_supplier_data_update.sql b/lambdas/goose-migrator-lambda/migrations/000007_supplier_data_update.sql new file mode 100644 index 000000000..973410b0b --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000007_supplier_data_update.sql @@ -0,0 +1,9 @@ +-- +goose Up +UPDATE supplier +SET client_id = '4g3lEP_BGzTd9MZTbtejrrlbgb7vJZzfxkKq6R3zTWY' +WHERE supplier_id = '77777777-7777-4777-8777-777777777777'; + +-- +goose Down +UPDATE supplier +SET client_id = 'zrgmf33Zdk-515BIMrds29v9Z3KzoH-tfYDgxLsYtZE' +WHERE supplier_id = '77777777-7777-4777-8777-777777777777'; diff --git a/lambdas/goose-migrator-lambda/migrations/000008_schema_per_environment.sql b/lambdas/goose-migrator-lambda/migrations/000008_schema_per_environment.sql new file mode 100644 index 000000000..e8aea6024 --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000008_schema_per_environment.sql @@ -0,0 +1,24 @@ +-- +goose Up + +-- ================================================================= +-- Schema-per-environment setup +-- +-- This migration creates: +-- 1. A dedicated schema for the current environment +-- 2. An app_user role with access ONLY to that schema +-- +-- The schema name is set via search_path at connection time. +-- The goose migrator connects with search_path=, +-- so all DDL runs inside that schema automatically. +-- +-- NOTE: This migration runs as the master (postgres) user which +-- has the privileges to create schemas and roles. +-- ================================================================= + +-- Create extension if not exists (database-level, idempotent) +CREATE EXTENSION IF NOT EXISTS pgcrypto; + + +-- +goose Down +-- Down migration intentionally left minimal to avoid data loss. +-- To fully remove, manually drop the schema and role. diff --git a/lambdas/goose-migrator-lambda/migrations/000009_update_supplier_endpoints.sql b/lambdas/goose-migrator-lambda/migrations/000009_update_supplier_endpoints.sql new file mode 100644 index 000000000..09c46da3d --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000009_update_supplier_endpoints.sql @@ -0,0 +1,31 @@ +-- +goose Up +ALTER TABLE supplier +ADD COLUMN IF NOT EXISTS results_path varchar(255); + +UPDATE supplier +SET + service_url = 'https://hometest-staging.prevx.io/', + oauth_token_path = '/api/oauth/token', + results_path = '/api/results' +WHERE supplier_id = '11111111-1111-4111-8111-111111111111'; + +UPDATE supplier +SET + results_path = '/nhs_home_test/results', + order_path = '/nhs_home_test/order' +WHERE supplier_id = '77777777-7777-4777-8777-777777777777'; + +-- +goose Down +UPDATE supplier +SET + service_url = 'https://func-nhshometest-staging.azurewebsites.net/', + oauth_token_path = '/api/oauth' +WHERE supplier_id = '11111111-1111-4111-8111-111111111111'; + +UPDATE supplier +SET + order_path = '/order' +WHERE supplier_id = '77777777-7777-4777-8777-777777777777'; + +ALTER TABLE supplier +DROP COLUMN IF EXISTS results_path; diff --git a/lambdas/goose-migrator-lambda/migrations/000010_update_order_status_codes.sql b/lambdas/goose-migrator-lambda/migrations/000010_update_order_status_codes.sql new file mode 100644 index 000000000..176614582 --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000010_update_order_status_codes.sql @@ -0,0 +1,68 @@ +-- +goose Up + +-- +goose StatementBegin +DO $$ +DECLARE _con text; +BEGIN + SELECT conname INTO _con FROM pg_constraint + WHERE conrelid = 'order_status'::regclass AND contype = 'f' + AND confrelid = 'status_type'::regclass; + IF _con IS NOT NULL THEN + EXECUTE format('ALTER TABLE order_status DROP CONSTRAINT %I', _con); + END IF; +END; +$$; +-- +goose StatementEnd +-- Temporarily add ON UPDATE CASCADE to enable status code renames, then restore original constraint +ALTER TABLE order_status +ADD CONSTRAINT order_status_status_code_fkey +FOREIGN KEY (status_code) REFERENCES status_type (status_code) ON UPDATE CASCADE; + +UPDATE status_type +SET + status_code = 'SUBMITTED', + description = 'Order has been submitted to the supplier' +WHERE status_code = 'PLACED'; + +UPDATE status_type +SET + status_code = 'CONFIRMED', + description = 'Order has been confirmed by the supplier' +WHERE status_code = 'ORDER_RECEIVED'; + +-- Remove CASCADE after updates complete, restoring original non-cascading behavior +ALTER TABLE order_status +DROP CONSTRAINT order_status_status_code_fkey; + +ALTER TABLE order_status +ADD CONSTRAINT order_status_status_code_fkey +FOREIGN KEY (status_code) REFERENCES status_type (status_code); + +-- +goose Down +ALTER TABLE order_status +DROP CONSTRAINT order_status_status_code_fkey; + +-- Temporarily add ON UPDATE CASCADE to allow renames to revert, then restore original constraint +ALTER TABLE order_status +ADD CONSTRAINT order_status_status_code_fkey +FOREIGN KEY (status_code) REFERENCES status_type (status_code) ON UPDATE CASCADE; + +UPDATE status_type +SET + status_code = 'PLACED', + description = 'Order has been placed with the supplier' +WHERE status_code = 'SUBMITTED'; + +UPDATE status_type +SET + status_code = 'ORDER_RECEIVED', + description = 'Order has been confirmed by the supplier' +WHERE status_code = 'CONFIRMED'; + +-- Restore original constraint without ON UPDATE CASCADE +ALTER TABLE order_status +DROP CONSTRAINT order_status_status_code_fkey; + +ALTER TABLE order_status +ADD CONSTRAINT order_status_status_code_fkey +FOREIGN KEY (status_code) REFERENCES status_type (status_code); diff --git a/database/migrations/000005_add_consent_table.sql b/lambdas/goose-migrator-lambda/migrations/000011_add_consent_table.sql similarity index 65% rename from database/migrations/000005_add_consent_table.sql rename to lambdas/goose-migrator-lambda/migrations/000011_add_consent_table.sql index 175739658..435d4eff0 100644 --- a/database/migrations/000005_add_consent_table.sql +++ b/lambdas/goose-migrator-lambda/migrations/000011_add_consent_table.sql @@ -12,5 +12,8 @@ CREATE UNIQUE INDEX idx_consent_order_uid ON consent (order_uid); -- +goose Down -DROP INDEX idx_consent_order_uid; -DROP TABLE consent; +-- NOTE: In production, this rollback should be avoided as the consent table +-- holds legally required audit data. However, for testing purposes, we provide +-- the cleanup statements below. +DROP INDEX IF EXISTS idx_consent_order_uid; +DROP TABLE IF EXISTS consent; diff --git a/lambdas/goose-migrator-lambda/migrations/000012_add_la_supplier_offering.sql b/lambdas/goose-migrator-lambda/migrations/000012_add_la_supplier_offering.sql new file mode 100644 index 000000000..82d216b7e --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000012_add_la_supplier_offering.sql @@ -0,0 +1,13 @@ +-- +goose Up +INSERT INTO la_supplier_offering (offering_id, supplier_id, test_code, la_code, effective_from) +VALUES +('a5e6f7a8-5678-4def-8abc-56789abcdefa', '11111111-1111-4111-8111-111111111111', '31676001', '1440', DATE '2026-03-06'), +('c7a8b9c0-7890-4fab-8cde-789abcdefabc', '77777777-7777-4777-8777-777777777777', '31676001', '4230', DATE '2026-03-06') +ON CONFLICT (la_code, supplier_id, test_code) DO NOTHING; + +-- +goose Down +DELETE FROM la_supplier_offering +WHERE offering_id IN ( + 'a5e6f7a8-5678-4def-8abc-56789abcdefa', + 'c7a8b9c0-7890-4fab-8cde-789abcdefabc' +); diff --git a/database/migrations/000006_remove_order_reference_from_order_status.sql b/lambdas/goose-migrator-lambda/migrations/000013_remove_order_reference_from_order_status.sql similarity index 100% rename from database/migrations/000006_remove_order_reference_from_order_status.sql rename to lambdas/goose-migrator-lambda/migrations/000013_remove_order_reference_from_order_status.sql diff --git a/database/migrations/000007_add_notification_audit_table.sql b/lambdas/goose-migrator-lambda/migrations/000014_add_notification_audit_table.sql similarity index 100% rename from database/migrations/000007_add_notification_audit_table.sql rename to lambdas/goose-migrator-lambda/migrations/000014_add_notification_audit_table.sql diff --git a/database/migrations/000008_create_session_table.sql b/lambdas/goose-migrator-lambda/migrations/000015_create_session_table.sql similarity index 60% rename from database/migrations/000008_create_session_table.sql rename to lambdas/goose-migrator-lambda/migrations/000015_create_session_table.sql index 4bfa70dc7..50f6538a3 100644 --- a/database/migrations/000008_create_session_table.sql +++ b/lambdas/goose-migrator-lambda/migrations/000015_create_session_table.sql @@ -1,5 +1,5 @@ -- +goose Up -CREATE TABLE IF NOT EXISTS session +CREATE TABLE session ( session_id uuid PRIMARY KEY DEFAULT gen_random_uuid(), refresh_token_id uuid NOT NULL, @@ -16,14 +16,19 @@ CREATE TABLE IF NOT EXISTS session birth_date date NOT NULL, nhs_number varchar(10) NOT NULL, gp_ods_code varchar(20) NOT NULL, - session_created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, - last_refresh_at timestamp with time zone NOT NULL DEFAULT current_timestamp, - max_expires_at timestamp with time zone NOT NULL, - CONSTRAINT uq_session_refresh_token_id UNIQUE (refresh_token_id), - CONSTRAINT chk_session_nhs_number_format CHECK (nhs_number ~ '^[0-9]{10}$') + session_created_at timestamp + with time zone NOT NULL DEFAULT current_timestamp, + last_refresh_at timestamp + with time zone NOT NULL DEFAULT current_timestamp, + max_expires_at timestamp + with time zone NOT NULL, + CONSTRAINT uq_session_refresh_token_id UNIQUE + (refresh_token_id), + CONSTRAINT chk_session_nhs_number_format CHECK + (nhs_number ~ '^[0-9]{10}$') ); -CREATE INDEX IF NOT EXISTS idx_session_max_expires_at +CREATE INDEX idx_session_max_expires_at ON session (max_expires_at); diff --git a/lambdas/goose-migrator-lambda/migrations/000016_add_order_status_reminder_table.sql b/lambdas/goose-migrator-lambda/migrations/000016_add_order_status_reminder_table.sql new file mode 100644 index 000000000..9ece87ad3 --- /dev/null +++ b/lambdas/goose-migrator-lambda/migrations/000016_add_order_status_reminder_table.sql @@ -0,0 +1,43 @@ +-- +goose Up +CREATE TYPE reminder_status AS ENUM +( + 'SCHEDULED', + 'QUEUED', + 'FAILED', + 'CANCELLED' +); + +CREATE TABLE +IF NOT EXISTS order_status_reminder +( + reminder_id uuid PRIMARY KEY DEFAULT gen_random_uuid(), + order_uid uuid NOT NULL REFERENCES test_order + (order_uid) ON + DELETE CASCADE, + trigger_status varchar(50) + NOT NULL REFERENCES status_type + (status_code), + reminder_number smallint NOT NULL CHECK + (reminder_number >= 1), + status reminder_status NOT NULL, + triggered_at timestamp + with time zone NOT NULL, + sent_at timestamp + with time zone, + created_at timestamp + with time zone NOT NULL DEFAULT current_timestamp, + CONSTRAINT uq_order_status_reminder + UNIQUE + (order_uid, trigger_status, reminder_number) +); + +CREATE INDEX +IF NOT EXISTS idx_order_status_reminder_status_triggered_at +ON order_status_reminder +(status, triggered_at); + + +-- +goose Down +DROP INDEX IF EXISTS idx_order_status_reminder_status_triggered_at; +DROP TABLE IF EXISTS order_status_reminder; +DROP TYPE IF EXISTS reminder_status; diff --git a/database/migrations/000010_add_latest_order_status_view.sql b/lambdas/goose-migrator-lambda/migrations/000017_add_latest_order_status_view.sql similarity index 100% rename from database/migrations/000010_add_latest_order_status_view.sql rename to lambdas/goose-migrator-lambda/migrations/000017_add_latest_order_status_view.sql diff --git a/lambdas/goose-migrator-lambda/scripts/build.sh b/lambdas/goose-migrator-lambda/scripts/build.sh new file mode 100755 index 000000000..bdeffa037 --- /dev/null +++ b/lambdas/goose-migrator-lambda/scripts/build.sh @@ -0,0 +1,242 @@ +#!/usr/bin/env bash +# ----------------------------------------------------------------------------- +# Build Goose Migrator Script +# Only rebuilds the goose-migrator Lambda when source code changes are detected. +# Uses content hashing to determine if a rebuild is necessary. +# +# Usage: +# ./lambdas/goose-migrator-lambda/scripts/build.sh +# +# Optional environment variables: +# MIGRATOR_CACHE_DIR Build cache directory (default: .migrator-build-cache) +# FORCE_MIGRATOR_REBUILD=true Force rebuild even if no changes detected +# ----------------------------------------------------------------------------- + +set -euo pipefail + +# ----------------------------------------------------------------------------- +# Configuration +# ----------------------------------------------------------------------------- +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +MIGRATOR_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +CACHE_DIR_INPUT="${MIGRATOR_CACHE_DIR:-.migrator-build-cache}" +FORCE_REBUILD="${FORCE_MIGRATOR_REBUILD:-false}" + +# Create and resolve cache directory +mkdir -p "$CACHE_DIR_INPUT" +CACHE_DIR=$(cd "$CACHE_DIR_INPUT" && pwd) + +# Cache file locations +HASH_FILE="$CACHE_DIR/goose-migrator.hash" +BUILD_LOG="$CACHE_DIR/last-build.log" + +# Output zip path +ZIP_FILE="$MIGRATOR_DIR/goose-migrator-lambda.zip" + +# ----------------------------------------------------------------------------- +# Functions +# ----------------------------------------------------------------------------- + +calculate_source_hash() { + # Calculate hash of all source files that affect the build: + # - Go source files (*.go) + # - Go module files (go.mod, go.sum) + # - SQL migration files (*.sql) + + local hash_cmd="sha256sum" + if ! command -v sha256sum &> /dev/null; then + hash_cmd="md5sum" + fi + + local all_hashes="" + + # Hash Go source files + if [[ -d "$MIGRATOR_DIR/src" ]]; then + local go_hash + go_hash=$(find "$MIGRATOR_DIR/src" -maxdepth 1 -type f \( \ + -name "*.go" \ + \) 2>/dev/null | sort | xargs cat 2>/dev/null | $hash_cmd | cut -d' ' -f1) + all_hashes+="go:${go_hash}|" + fi + + # Hash Go module files + for file in "$MIGRATOR_DIR/src/go.mod" "$MIGRATOR_DIR/src/go.sum"; do + if [[ -f "$file" ]]; then + local file_hash + file_hash=$($hash_cmd "$file" | cut -d' ' -f1) + all_hashes+="$(basename "$file"):${file_hash}|" + fi + done + + # Hash SQL migration files + if [[ -d "$MIGRATOR_DIR/migrations" ]]; then + local sql_hash + sql_hash=$(find "$MIGRATOR_DIR/migrations" -type f -name "*.sql" 2>/dev/null | sort | xargs cat 2>/dev/null | $hash_cmd | cut -d' ' -f1) + all_hashes+="migrations:${sql_hash}|" + fi + + # Combine all hashes into final hash + local final_hash + final_hash=$(echo "$all_hashes" | $hash_cmd | cut -d' ' -f1) + + echo "$final_hash" + return 0 +} + +show_hash_inputs() { + echo "Files included in hash calculation:" + echo " Go source files:" + find "$MIGRATOR_DIR/src" -maxdepth 1 -type f -name "*.go" 2>/dev/null | wc -l | xargs printf " %s files in src/\n" + + echo " Module files:" + for file in go.mod go.sum; do + if [[ -f "$MIGRATOR_DIR/src/$file" ]]; then + echo " $file" + fi + done + + echo " Migration files:" + find "$MIGRATOR_DIR/migrations" -type f -name "*.sql" 2>/dev/null | wc -l | xargs printf " %s files in migrations/\n" + return 0 +} + +get_cached_hash() { + if [[ -f "$HASH_FILE" ]]; then + cat "$HASH_FILE" + else + echo "" + fi + return 0 +} + +save_hash() { + local hash="$1" + echo "$hash" > "$HASH_FILE" + return 0 +} + +needs_rebuild() { + if [[ "$FORCE_REBUILD" == "true" ]]; then + echo "Force rebuild requested via FORCE_MIGRATOR_REBUILD=true" + return 0 + fi + + local current_hash + current_hash=$(calculate_source_hash) + local cached_hash + cached_hash=$(get_cached_hash) + + if [[ -z "$cached_hash" ]]; then + echo "No cached hash found - initial build required" + return 0 + fi + + if [[ "$current_hash" != "$cached_hash" ]]; then + echo "Source changes detected (hash changed)" + echo " Previous: ${cached_hash:0:16}..." + echo " Current: ${current_hash:0:16}..." + return 0 + fi + + # Check if zip exists + if [[ ! -f "$ZIP_FILE" ]]; then + echo "Zip file missing - rebuild required" + return 0 + fi + + return 1 +} + +build_migrator() { + echo "Compiling Go binary (linux/arm64)..." + cd "$MIGRATOR_DIR/src" + + # go mod download: fetches dependencies pinned in go.sum without modifying go.mod/go.sum + # go mod verify: ensures downloaded modules match go.sum checksums + # (NOT go mod tidy: tidy rewrites go.mod/go.sum based on the builder's environment, + # creating non-reproducible builds. Run 'go mod tidy' manually as a developer step + # when updating dependencies, not as part of the automated build.) + go mod download + go mod verify + + GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build -mod=readonly -trimpath -ldflags='-s -w' -o bootstrap main.go + return 0 +} + +package_migrator() { + echo "Packaging goose-migrator-lambda..." + cd "$MIGRATOR_DIR" + + # Remove existing zip if present + rm -f "$ZIP_FILE" + + # Create zip: bootstrap binary at root + migrations/ directory + zip -j "$ZIP_FILE" src/bootstrap + zip -r "$ZIP_FILE" migrations/ + + echo " Created: $ZIP_FILE ($(du -h "$ZIP_FILE" | cut -f1))" + return 0 +} + +# ----------------------------------------------------------------------------- +# Main +# ----------------------------------------------------------------------------- + +echo "==========================================" +echo "Goose Migrator Build Script" +echo "==========================================" +echo "Migrator directory: $MIGRATOR_DIR" +echo "Cache directory: $CACHE_DIR" +echo "" + +# Show what's being tracked for changes +show_hash_inputs +echo "" + +# Check if rebuild is needed +if needs_rebuild; then + echo "" + echo "Starting build process..." + echo "" + + # Capture start time + start_time=$(date +%s) + + # Run build steps + build_migrator + package_migrator + + # Calculate and save new hash + new_hash=$(calculate_source_hash) + save_hash "$new_hash" + + # Calculate duration + end_time=$(date +%s) + duration=$((end_time - start_time)) + + echo "" + echo "==========================================" + echo "Goose migrator build complete! (${duration}s)" + echo "Hash: ${new_hash:0:16}..." + echo "==========================================" + + # Log build info + { + echo "Build completed: $(date -Iseconds)" + echo "Duration: ${duration}s" + echo "Hash: $new_hash" + } > "$BUILD_LOG" +else + echo "" + echo "==========================================" + echo "No changes detected - skipping build" + echo "==========================================" + + if [[ -f "$BUILD_LOG" ]]; then + echo "" + echo "Last build info:" + cat "$BUILD_LOG" + fi +fi + +echo "" diff --git a/lambdas/goose-migrator-lambda/scripts/test-migrations.sh b/lambdas/goose-migrator-lambda/scripts/test-migrations.sh new file mode 100755 index 000000000..e3e166dbe --- /dev/null +++ b/lambdas/goose-migrator-lambda/scripts/test-migrations.sh @@ -0,0 +1,388 @@ +#!/usr/bin/env bash + +# Test Goose database migrations against a local PostgreSQL container, +# replicating the exact behaviour of the goose-migrator-lambda Lambda: +# +# 1. Connect as master user +# 2. Create a named schema (DB_SCHEMA) if it doesn't exist +# 3. Create app_user_ role, grant schema-scoped privileges +# 4. Set search_path to the schema, run goose migrations +# 5. Verify app_user can connect and perform DML — but cannot see other schemas +# 6. Test rollback and idempotent re-apply +# +# Usage: +# ./lambdas/goose-migrator-lambda/scripts/test-migrations.sh +# +# Prerequisites: +# - Docker installed and running +# - mise installed (will install goose automatically) +# +# Environment variables (optional): +# POSTGRES_IMAGE - PostgreSQL Docker image (default: postgres:17.9) +# POSTGRES_USER - Master DB user (default: testuser) +# POSTGRES_PASSWORD - Master DB password (default: testpassword) +# POSTGRES_DB - Database name (default: testdb) +# POSTGRES_PORT - Host port to map (default: 5432) +# POSTGRES_SCHEMA - Target schema, mirroring DB_SCHEMA in the Lambda (default: hometest) +# APP_USER_PASSWORD - Password for app_user_ (default: appuserpassword) +# KEEP_CONTAINER - Set to "true" to keep container after tests (default: false) + +set -euo pipefail + +# ============================================================================== +# Configuration +# ============================================================================== + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LAMBDA_DIR="$(cd "${SCRIPT_DIR}/.." && pwd)" +MIGRATIONS_DIR="${LAMBDA_DIR}/migrations" + +POSTGRES_IMAGE="${POSTGRES_IMAGE:-postgres:17.9}" +POSTGRES_USER="${POSTGRES_USER:-testuser}" +POSTGRES_PASSWORD="${POSTGRES_PASSWORD:-testpassword}" +POSTGRES_DB="${POSTGRES_DB:-testdb}" +POSTGRES_PORT="${POSTGRES_PORT:-15432}" +POSTGRES_SCHEMA="${POSTGRES_SCHEMA:-hometest}" +APP_USER_PASSWORD="${APP_USER_PASSWORD:-appuserpassword}" +KEEP_CONTAINER="${KEEP_CONTAINER:-false}" + +# Derived — mirrors the naming convention in main.go: app_user_ +APP_USERNAME="app_user_${POSTGRES_SCHEMA}" + +CONTAINER_NAME="goose-migrations-test-$$" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# ============================================================================== +# Functions +# ============================================================================== + +log_info() { + echo -e "${GREEN}[INFO]${NC} $*" + return 0 +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $*" + return 0 +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $*" + return 0 +} + +cleanup() { + if [[ "${KEEP_CONTAINER}" != "true" ]]; then + log_info "Cleaning up container ${CONTAINER_NAME}..." + docker rm -f "${CONTAINER_NAME}" 2>/dev/null || true + else + log_warn "Container ${CONTAINER_NAME} kept running (KEEP_CONTAINER=true)" + fi + return 0 +} + +wait_for_postgres() { + local max_attempts=30 + local attempt=1 + + log_info "Waiting for PostgreSQL to be ready (in-container check)..." + until docker exec "${CONTAINER_NAME}" pg_isready -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" >/dev/null 2>&1; do + if [[ ${attempt} -ge ${max_attempts} ]]; then + log_error "PostgreSQL failed to become ready after ${max_attempts} attempts" + return 1 + fi + echo -n "." + sleep 1 + ((attempt++)) + done + echo "" + + # pg_isready can return success briefly during PostgreSQL's init cycle + # (start - initdb - shutdown - restart) before the server is fully ready. + # A short sleep avoids "database system is shutting down" on the next connection. + sleep 2 + + # Also verify the host-side port mapping is reachable (important on macOS Docker Desktop) + attempt=1 + log_info "Verifying host-side connectivity on port ${POSTGRES_PORT}..." + until PGPASSWORD="${POSTGRES_PASSWORD}" psql -h localhost -p "${POSTGRES_PORT}" -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -c "SELECT 1" >/dev/null 2>&1 \ + || nc -z localhost "${POSTGRES_PORT}" 2>/dev/null; do + if [[ ${attempt} -ge ${max_attempts} ]]; then + log_error "Host-side connection to localhost:${POSTGRES_PORT} failed after ${max_attempts} attempts" + return 1 + fi + echo -n "." + sleep 1 + ((attempt++)) + done + echo "" + log_info "PostgreSQL is ready!" + return 0 +} + +ensure_goose() { + if command -v goose &>/dev/null; then + log_info "Using goose: $(goose --version 2>&1 | head -1)" + return 0 + fi + + if command -v mise &>/dev/null; then + log_info "Installing goose via mise..." + mise install "go:github.com/pressly/goose/v3/cmd/goose" + eval "$(mise env)" + if command -v goose &>/dev/null; then + log_info "Goose installed: $(goose --version 2>&1 | head -1)" + return 0 + fi + fi + + log_error "goose not found. Please install it via 'mise install' or 'go install github.com/pressly/goose/v3/cmd/goose@latest'" + return 1 +} + +# psql as master user. +psql_master() { + docker exec -i -e PGPASSWORD="${POSTGRES_PASSWORD}" "${CONTAINER_NAME}" \ + psql -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" "$@" + return $? +} + +# psql as app_user_ (limited role). +psql_appuser() { + docker exec -i -e PGPASSWORD="${APP_USER_PASSWORD}" "${CONTAINER_NAME}" \ + psql -U "${APP_USERNAME}" -d "${POSTGRES_DB}" "$@" + return $? +} + +# Run goose as the master user. search_path is already configured at the +# role level (ALTER ROLE ... SET search_path) in setup_schema_and_user, +# so goose will create its tables in the target schema automatically. +run_goose() { + local cmd="$1" + shift + GOOSE_DRIVER=postgres \ + GOOSE_DBSTRING="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@localhost:${POSTGRES_PORT}/${POSTGRES_DB}?sslmode=disable" \ + goose -dir "${MIGRATIONS_DIR}" "${cmd}" "$@" + return $? +} + +# Replicate setupSchemaAndUser from main.go: +# - CREATE SCHEMA IF NOT EXISTS +# - CREATE ROLE app_user_ LOGIN PASSWORD '...' +# - GRANT USAGE ON SCHEMA +# - GRANT SELECT/INSERT/UPDATE/DELETE ON ALL TABLES +# - GRANT USAGE/SELECT/UPDATE ON ALL SEQUENCES +# - ALTER DEFAULT PRIVILEGES (so future tables are also covered) +# - ALTER ROLE ... SET search_path TO +setup_schema_and_user() { + log_info "Setting up schema '${POSTGRES_SCHEMA}' and role '${APP_USERNAME}'..." + + psql_master -v ON_ERROR_STOP=1 <<-SQL + -- 1. Create schema + CREATE SCHEMA IF NOT EXISTS ${POSTGRES_SCHEMA}; + + -- 2. Create app_user role (idempotent) + DO \$\$ + BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = '${APP_USERNAME}') THEN + CREATE ROLE ${APP_USERNAME} LOGIN PASSWORD '${APP_USER_PASSWORD}'; + ELSE + ALTER ROLE ${APP_USERNAME} PASSWORD '${APP_USER_PASSWORD}'; + END IF; + END + \$\$; + + -- 3. Default search_path for this role (mirrors ALTER ROLE ... SET search_path in main.go) + ALTER ROLE ${APP_USERNAME} SET search_path TO ${POSTGRES_SCHEMA}; + + -- 4. Set master user search_path so goose creates tables in the target schema + ALTER ROLE ${POSTGRES_USER} SET search_path TO ${POSTGRES_SCHEMA}; + + -- 5. Schema-scoped grants + GRANT USAGE ON SCHEMA ${POSTGRES_SCHEMA} TO ${APP_USERNAME}; + GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA ${POSTGRES_SCHEMA} TO ${APP_USERNAME}; + GRANT USAGE, SELECT, UPDATE ON ALL SEQUENCES IN SCHEMA ${POSTGRES_SCHEMA} TO ${APP_USERNAME}; + ALTER DEFAULT PRIVILEGES IN SCHEMA ${POSTGRES_SCHEMA} + GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO ${APP_USERNAME}; + ALTER DEFAULT PRIVILEGES IN SCHEMA ${POSTGRES_SCHEMA} + GRANT USAGE, SELECT, UPDATE ON SEQUENCES TO ${APP_USERNAME}; +SQL + + log_info "Schema '${POSTGRES_SCHEMA}' and role '${APP_USERNAME}' ready." + return 0 +} + +verify_tables_in_schema() { + log_info "=== Verifying tables exist in schema '${POSTGRES_SCHEMA}' (not public) ===" + + local table_count + table_count=$(psql_master -t -c \ + "SELECT count(*) FROM information_schema.tables WHERE table_schema = '${POSTGRES_SCHEMA}';" \ + | xargs) + + if [[ "${table_count}" -eq 0 ]]; then + log_error "No tables found in schema '${POSTGRES_SCHEMA}' after migration!" + return 1 + fi + log_info "Found ${table_count} table(s) in schema '${POSTGRES_SCHEMA}'" + + log_info "Table list in '${POSTGRES_SCHEMA}':" + psql_master -c \ + "SELECT table_name FROM information_schema.tables WHERE table_schema = '${POSTGRES_SCHEMA}' ORDER BY table_name;" + + log_info "Goose version table in '${POSTGRES_SCHEMA}':" + psql_master -c "SELECT * FROM ${POSTGRES_SCHEMA}.goose_db_version ORDER BY id;" + return 0 +} + +verify_no_tables_in_public() { + log_info "=== Verifying no application tables leaked into 'public' schema ===" + + log_info "All schemas in database:" + psql_master -c \ + "SELECT schema_name FROM information_schema.schemata ORDER BY schema_name;" + + log_info "Tables in 'public' schema:" + psql_master -c \ + "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' ORDER BY table_name;" + + local public_tables + public_tables=$(psql_master -t -c \ + "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_name != 'goose_db_version';" \ + | xargs) + + if [[ -n "${public_tables}" ]]; then + log_warn "Unexpected tables found in public schema: ${public_tables}" + else + log_info "public schema is clean (no application tables)" + fi + return 0 +} + +verify_app_user_access() { + log_info "=== Verifying app_user access as '${APP_USERNAME}' ===" + + # app_user must be able to SELECT from tables in the schema + log_info " SELECT from test_type..." + psql_appuser -v ON_ERROR_STOP=1 -c "SELECT * FROM test_type LIMIT 1;" >/dev/null + + # app_user must be able to INSERT into test_type (seeded by migrations) + log_info " INSERT into test_type..." + psql_appuser -v ON_ERROR_STOP=1 -c \ + "INSERT INTO test_type (test_code, description) VALUES ('TEST_ACCESS_CHECK', 'app_user access verification') ON CONFLICT DO NOTHING;" >/dev/null + + # app_user must be able to DELETE what it just inserted + log_info " DELETE from test_type..." + psql_appuser -v ON_ERROR_STOP=1 -c \ + "DELETE FROM test_type WHERE test_code = 'TEST_ACCESS_CHECK';" >/dev/null + + # app_user must NOT be able to create tables (not a schema owner) + log_info " Verify app_user cannot CREATE TABLE..." + if psql_appuser -c "CREATE TABLE ${POSTGRES_SCHEMA}.should_fail (id int);" 2>&1; then + log_error "app_user was unexpectedly able to CREATE TABLE — privileges are too broad!" + return 1 + else + log_info " Correctly denied: app_user cannot CREATE TABLE" + fi + + log_info "app_user access checks passed." + return 0 +} + +# ============================================================================== +# Main +# ============================================================================== + +main() { + trap cleanup EXIT + + log_info "=== Goose Migration Tests ===" + log_info "Migrations directory : ${MIGRATIONS_DIR}" + log_info "Target schema : ${POSTGRES_SCHEMA}" + log_info "App user : ${APP_USERNAME}" + + # Check prerequisites + if [[ ! -d "${MIGRATIONS_DIR}" ]]; then + log_error "Migrations directory not found: ${MIGRATIONS_DIR}" + exit 1 + fi + + ensure_goose + + # Start PostgreSQL container + log_info "Starting PostgreSQL container (${POSTGRES_IMAGE})..." + docker run -d \ + --name "${CONTAINER_NAME}" \ + -e POSTGRES_USER="${POSTGRES_USER}" \ + -e POSTGRES_PASSWORD="${POSTGRES_PASSWORD}" \ + -e POSTGRES_DB="${POSTGRES_DB}" \ + -p "${POSTGRES_PORT}:5432" \ + "${POSTGRES_IMAGE}" + + wait_for_postgres + + # Step 1: Replicate setupSchemaAndUser (Lambda step 1) + setup_schema_and_user + + # Step 2: Validate migration files before running them + log_info "=== Validating Migrations ===" + run_goose validate + + # Step 3: Goose status before any migrations + log_info "=== Migration Status (Initial) ===" + run_goose status + + # Step 4: Run migrations with search_path= (Lambda step 2) + log_info "=== Running Migrations (Up) ===" + run_goose up + + log_info "=== Migration Status (After Up) ===" + run_goose status + + # Step 5: Verify tables landed in the right schema + verify_tables_in_schema + verify_no_tables_in_public + + # Step 6: Re-grant on existing tables (Lambda step 3 — covers tables created before DEFAULT PRIVILEGES) + log_info "=== Granting privileges on migrated tables to '${APP_USERNAME}' ===" + psql_master -v ON_ERROR_STOP=1 -c \ + "GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA ${POSTGRES_SCHEMA} TO ${APP_USERNAME};" + psql_master -v ON_ERROR_STOP=1 -c \ + "GRANT USAGE, SELECT, UPDATE ON ALL SEQUENCES IN SCHEMA ${POSTGRES_SCHEMA} TO ${APP_USERNAME};" + + # Step 7: Verify app_user can perform DML but cannot create tables + verify_app_user_access + + # Step 8: Rollback and re-apply (idempotency) + log_info "=== Testing Rollback (Down) ===" + run_goose down + + log_info "=== Migration Status (After Down) ===" + run_goose status + + log_info "=== Testing Re-apply (Up again — idempotency) ===" + run_goose up + + # Step 9: Re-grant after re-apply (mirrors what the Lambda does on each invocation) + psql_master -v ON_ERROR_STOP=1 -c \ + "GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA ${POSTGRES_SCHEMA} TO ${APP_USERNAME};" >/dev/null + psql_master -v ON_ERROR_STOP=1 -c \ + "GRANT USAGE, SELECT, UPDATE ON ALL SEQUENCES IN SCHEMA ${POSTGRES_SCHEMA} TO ${APP_USERNAME};" >/dev/null + + log_info "=== Final Migration Status ===" + run_goose status + + verify_tables_in_schema + verify_app_user_access + + log_info "" + log_info "=== All migration tests passed! ===" + return 0 +} + +main "$@" diff --git a/lambdas/goose-migrator-lambda/src/go.mod b/lambdas/goose-migrator-lambda/src/go.mod new file mode 100644 index 000000000..c74af2cf8 --- /dev/null +++ b/lambdas/goose-migrator-lambda/src/go.mod @@ -0,0 +1,18 @@ +module goose-migrator-lambda + +go 1.26.2 + +require ( + github.com/aws/aws-lambda-go v1.44.0 + github.com/aws/aws-sdk-go v1.55.8 + github.com/lib/pq v1.10.9 + github.com/pressly/goose/v3 v3.27.0 +) + +require ( + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/sync v0.19.0 // indirect +) diff --git a/lambdas/goose-migrator-lambda/src/go.sum b/lambdas/goose-migrator-lambda/src/go.sum new file mode 100644 index 000000000..5b14379bc --- /dev/null +++ b/lambdas/goose-migrator-lambda/src/go.sum @@ -0,0 +1,55 @@ +github.com/aws/aws-lambda-go v1.44.0 h1:Xp9PANXKsSJ23IhE4ths592uWTCEewswPhSH9qpAuQQ= +github.com/aws/aws-lambda-go v1.44.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pressly/goose/v3 v3.27.0 h1:/D30gVTuQhu0WsNZYbJi4DMOsx1lNq+6SkLe+Wp59BM= +github.com/pressly/goose/v3 v3.27.0/go.mod h1:3ZBeCXqzkgIRvrEMDkYh1guvtoJTU5oMMuDdkutoM78= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDMEV06GpzjG2jrqW+QTE0= +golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/libc v1.68.0 h1:PJ5ikFOV5pwpW+VqCK1hKJuEWsonkIJhhIXyuF/91pQ= +modernc.org/libc v1.68.0/go.mod h1:NnKCYeoYgsEqnY3PgvNgAeaJnso968ygU8Z0DxjoEc0= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.46.1 h1:eFJ2ShBLIEnUWlLy12raN0Z1plqmFX9Qe3rjQTKt6sU= +modernc.org/sqlite v1.46.1/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= diff --git a/lambdas/goose-migrator-lambda/src/main.go b/lambdas/goose-migrator-lambda/src/main.go new file mode 100644 index 000000000..e16a49e5d --- /dev/null +++ b/lambdas/goose-migrator-lambda/src/main.go @@ -0,0 +1,432 @@ +package main + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "net/url" + "os" + "regexp" + + "github.com/aws/aws-lambda-go/lambda" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/rds/rdsutils" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/lib/pq" + "github.com/pressly/goose/v3" +) + +// getDBPassword fetches the DB password from AWS Secrets Manager. +// secretId accepts either a secret ARN or a secret name. +func getDBPassword(secretId string) (string, error) { + sess, err := session.NewSession() + if err != nil { + return "", fmt.Errorf("failed to create AWS session: %w", err) + } + client := secretsmanager.New(sess) + input := &secretsmanager.GetSecretValueInput{ + SecretId: aws.String(secretId), + } + result, err := client.GetSecretValue(input) + if err != nil { + return "", fmt.Errorf("failed to get secret value for %s: %w", secretId, err) + } + var secretString string + if result.SecretString != nil { + secretString = *result.SecretString + } else { + return "", fmt.Errorf("secret value is binary, not supported") + } + // Assume the secret is a JSON with at least a "password" field + var secretMap map[string]string + if err := json.Unmarshal([]byte(secretString), &secretMap); err != nil { + return "", fmt.Errorf("failed to unmarshal secret JSON: %w", err) + } + password, ok := secretMap["password"] + if !ok { + return "", fmt.Errorf("password field not found in secret") + } + return password, nil +} + +// getIAMAuthToken generates a short-lived RDS IAM authentication token using the Lambda's +// execution role credentials. The token is valid for 15 minutes and used as the DB password. +func getIAMAuthToken(host, port, region, dbUser string) (string, error) { + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(region), + }) + if err != nil { + return "", fmt.Errorf("failed to create AWS session: %w", err) + } + endpoint := fmt.Sprintf("%s:%s", host, port) + token, err := rdsutils.BuildAuthToken(endpoint, region, dbUser, sess.Config.Credentials) + if err != nil { + return "", fmt.Errorf("failed to build IAM auth token: %w", err) + } + return token, nil +} + +// buildPostgresURL constructs the PostgreSQL connection URL from environment variables. +// When USE_IAM_AUTH=true, an IAM authentication token is used instead of a static password. +// The Lambda's execution role must have the rds-db:connect IAM permission. +func buildPostgresURL() (string, error) { + user := os.Getenv("DB_USERNAME") + host := os.Getenv("DB_ADDRESS") + port := os.Getenv("DB_PORT") + dbname := os.Getenv("DB_NAME") + useIAMAuth := os.Getenv("USE_IAM_AUTH") == "true" + + if user == "" || host == "" || port == "" || dbname == "" { + return "", fmt.Errorf("missing one or more required environment variables: DB_USERNAME, DB_ADDRESS, DB_PORT, DB_NAME") + } + + var password string + if useIAMAuth { + region := os.Getenv("DB_REGION") + if region == "" { + return "", fmt.Errorf("DB_REGION is required when USE_IAM_AUTH is true") + } + log.Printf("Using IAM authentication for DB connection (region: %s, user: %s)", region, user) + token, err := getIAMAuthToken(host, port, region, user) + if err != nil { + return "", fmt.Errorf("failed to generate IAM auth token: %w", err) + } + password = token + } else { + secretArn := os.Getenv("DB_SECRET_ARN") + if secretArn == "" { + return "", fmt.Errorf("DB_SECRET_ARN is required when USE_IAM_AUTH is false") + } + var err error + password, err = getDBPassword(secretArn) + if err != nil { + return "", fmt.Errorf("failed to retrieve DB password: %w", err) + } + } + + // Build URL using url.UserPassword so userinfo is correctly percent-encoded + // (url.QueryEscape uses '+' for spaces which is invalid in URL userinfo) + u := &url.URL{ + Scheme: "postgres", + User: url.UserPassword(user, password), + Host: fmt.Sprintf("%s:%s", host, port), + Path: "/" + dbname, + } + q := url.Values{} + q.Set("sslmode", "require") + + // When DB_SCHEMA is set, include search_path in the connection URL so that + // every connection obtained from the *sql.DB pool uses the correct schema. + // lib/pq treats unknown DSN parameters as SET key=value session variables. + schema := os.Getenv("DB_SCHEMA") + if schema != "" && schema != "public" { + q.Set("search_path", schema) + } + u.RawQuery = q.Encode() + + return u.String(), nil +} + +// setupSchemaAndUser creates the schema if it doesn't exist and ensures app_user role +// has appropriate access scoped to that schema only. +// The password is read from the Terraform-managed Secrets Manager secret. +// When grantRdsIam is true, the rds_iam role is granted (IAM auth); when false, +// rds_iam is revoked so the user falls back to password authentication. +func setupSchemaAndUser(db *sql.DB, schema, appUserSecretName string, grantRdsIam bool) error { + appUsername := fmt.Sprintf("app_user_%s", schema) + + // Quote identifiers to prevent SQL injection (schema/role names cannot be parameterized) + quotedSchema := pq.QuoteIdentifier(schema) + quotedUser := pq.QuoteIdentifier(appUsername) + + log.Printf("Setting up schema '%s' and user '%s'...", schema, appUsername) + + // Read the password from the Terraform-managed Secrets Manager secret + password, err := getDBPassword(appUserSecretName) + if err != nil { + return fmt.Errorf("failed to read app user password from secret %s: %w", appUserSecretName, err) + } + + // Create schema if not exists + // SQL injection safe: quotedSchema uses pq.QuoteIdentifier() + if _, err := db.Exec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s", quotedSchema)); err != nil { // NOSONAR + return fmt.Errorf("failed to create schema %s: %w", schema, err) + } + log.Printf("Schema '%s' ensured", schema) + + // Check if role exists + var roleExists bool + err = db.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = $1)", appUsername).Scan(&roleExists) + if err != nil { + return fmt.Errorf("failed to check if role %s exists: %w", appUsername, err) + } + + // Use QuoteLiteral for password to safely escape special characters + quotedPassword := pq.QuoteLiteral(password) + + if !roleExists { + // Create the role with the password from Secrets Manager + // SQL injection safe: quotedUser uses pq.QuoteIdentifier(), quotedPassword uses pq.QuoteLiteral() + if _, err := db.Exec(fmt.Sprintf("CREATE ROLE %s LOGIN PASSWORD %s", quotedUser, quotedPassword)); err != nil { // NOSONAR + return fmt.Errorf("failed to create role %s: %w", appUsername, err) + } + log.Printf("Created role '%s'", appUsername) + } else { + // Sync password with the Terraform-managed secret (supports rotation) + // SQL injection safe: quotedUser uses pq.QuoteIdentifier(), quotedPassword uses pq.QuoteLiteral() + if _, err := db.Exec(fmt.Sprintf("ALTER ROLE %s PASSWORD %s", quotedUser, quotedPassword)); err != nil { // NOSONAR + return fmt.Errorf("failed to update password for role %s: %w", appUsername, err) + } + log.Printf("Synced password for existing role '%s' from Secrets Manager", appUsername) + } + + // Set default search_path for the role so it always uses the correct schema + // SQL injection safe: quotedUser and quotedSchema use pq.QuoteIdentifier() + if _, err := db.Exec(fmt.Sprintf("ALTER ROLE %s SET search_path TO %s", quotedUser, quotedSchema)); err != nil { // NOSONAR + return fmt.Errorf("failed to set search_path for %s: %w", appUsername, err) + } + + // Grant or revoke rds_iam based on whether IAM auth is desired. + // When rds_iam is granted, the user authenticates via PAM (IAM tokens only). + // When revoked, the user falls back to password auth (md5/scram). + // This is a no-op if the rds_iam role does not exist (non-Aurora environments). + var rdsIamExists bool + if err := db.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = 'rds_iam')").Scan(&rdsIamExists); err != nil { + return fmt.Errorf("failed to check for rds_iam role: %w", err) + } + if rdsIamExists { + if grantRdsIam { + // SQL injection safe: quotedUser uses pq.QuoteIdentifier() + if _, err := db.Exec(fmt.Sprintf("GRANT rds_iam TO %s", quotedUser)); err != nil { // NOSONAR + return fmt.Errorf("failed to grant rds_iam to %s: %w", appUsername, err) + } + log.Printf("Granted rds_iam to '%s' for IAM authentication support", appUsername) + } else { + // Check if user currently has rds_iam and revoke it + var hasRdsIam bool + if err := db.QueryRow( + "SELECT EXISTS(SELECT 1 FROM pg_auth_members WHERE roleid = (SELECT oid FROM pg_roles WHERE rolname = 'rds_iam') AND member = (SELECT oid FROM pg_roles WHERE rolname = $1))", + appUsername, + ).Scan(&hasRdsIam); err != nil { + return fmt.Errorf("failed to check rds_iam membership for %s: %w", appUsername, err) + } + if hasRdsIam { + // SQL injection safe: quotedUser uses pq.QuoteIdentifier() + if _, err := db.Exec(fmt.Sprintf("REVOKE rds_iam FROM %s", quotedUser)); err != nil { // NOSONAR + return fmt.Errorf("failed to revoke rds_iam from %s: %w", appUsername, err) + } + log.Printf("Revoked rds_iam from '%s' — user will use password authentication", appUsername) + } else { + log.Printf("rds_iam not granted to '%s', no revocation needed", appUsername) + } + } + } + + // Grant schema usage and DML privileges + // SQL injection safe: quotedSchema and quotedUser use pq.QuoteIdentifier() + grants := []string{ + fmt.Sprintf("GRANT USAGE ON SCHEMA %s TO %s", quotedSchema, quotedUser), + fmt.Sprintf("GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA %s TO %s", quotedSchema, quotedUser), + fmt.Sprintf("GRANT USAGE, SELECT, UPDATE ON ALL SEQUENCES IN SCHEMA %s TO %s", quotedSchema, quotedUser), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES IN SCHEMA %s GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO %s", quotedSchema, quotedUser), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES IN SCHEMA %s GRANT USAGE, SELECT, UPDATE ON SEQUENCES TO %s", quotedSchema, quotedUser), + } + + for _, grant := range grants { + if _, err := db.Exec(grant); err != nil { // NOSONAR - grant built with pq.QuoteIdentifier() + return fmt.Errorf("failed to execute grant '%s': %w", grant, err) + } + } + + log.Printf("Granted schema-scoped privileges to '%s' on schema '%s'", appUsername, schema) + return nil +} + +// Response struct +type Response struct { + Message string `json:"message"` +} + +// Event struct for Lambda invocation payload +type Event struct { + Action string `json:"action"` // "migrate" (default) or "teardown" +} + +// teardownSchemaAndUser drops the schema and its app_user role. +// This is called during environment destruction to clean up database resources. +func teardownSchemaAndUser(db *sql.DB, schema string) error { + appUsername := fmt.Sprintf("app_user_%s", schema) + + // Quote identifiers to prevent SQL injection + quotedSchema := pq.QuoteIdentifier(schema) + quotedUser := pq.QuoteIdentifier(appUsername) + + log.Printf("Tearing down schema '%s' and user '%s'...", schema, appUsername) + + // Revoke all privileges and drop schema (CASCADE drops all objects in the schema) + // SQL injection safe: quotedSchema uses pq.QuoteIdentifier() + teardownSQL := []string{ + fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", quotedSchema), + } + + for _, stmt := range teardownSQL { + log.Printf("Executing: %s", stmt) + if _, err := db.Exec(stmt); err != nil { // NOSONAR - stmt built with pq.QuoteIdentifier() + return fmt.Errorf("failed to execute '%s': %w", stmt, err) + } + } + log.Printf("Dropped schema '%s'", schema) + + // Revoke all remaining privileges and drop the role + var roleExists bool + err := db.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = $1)", appUsername).Scan(&roleExists) + if err != nil { + return fmt.Errorf("failed to check if role %s exists: %w", appUsername, err) + } + + if roleExists { + // Aurora's master user (rds_superuser) is not a true PostgreSQL superuser. + // REASSIGN OWNED BY requires membership in the source role, so we grant + // the app_user role to the current (master) user first. + // SQL injection safe: quotedUser uses pq.QuoteIdentifier() + revokeSQL := []string{ + fmt.Sprintf("GRANT %s TO CURRENT_USER", quotedUser), + fmt.Sprintf("REASSIGN OWNED BY %s TO CURRENT_USER", quotedUser), + fmt.Sprintf("DROP OWNED BY %s", quotedUser), + fmt.Sprintf("DROP ROLE %s", quotedUser), + } + for _, stmt := range revokeSQL { + log.Printf("Executing: %s", stmt) + if _, err := db.Exec(stmt); err != nil { // NOSONAR - stmt built with pq.QuoteIdentifier() + return fmt.Errorf("failed to execute '%s': %w", stmt, err) + } + } + log.Printf("Dropped role '%s'", appUsername) + } else { + log.Printf("Role '%s' does not exist, nothing to drop", appUsername) + } + + return nil +} + +// HandleRequest is the handler function for the Lambda function +func HandleRequest(ctx context.Context, event Event) (Response, error) { + action := event.Action + if action == "" { + action = "migrate" + } + + log.Printf("Starting Goose migration Lambda handler (action: %s)", action) + + schema := os.Getenv("DB_SCHEMA") + appUserSecretName := os.Getenv("APP_USER_SECRET_NAME") + + if schema == "" { + schema = "public" + log.Println("DB_SCHEMA not set, defaulting to 'public'") + } + + dbURL, err := buildPostgresURL() + if err != nil { + log.Printf("Failed to build DB URL: %s", redactPassword(err.Error())) + return Response{"Failed to build DB URL"}, err + } + + // Redact password in log output + log.Printf("Connecting to DB: %s", redactPassword(dbURL)) + db, err := sql.Open("postgres", dbURL) + if err != nil { + log.Printf("Failed to connect to DB: %s", redactPassword(err.Error())) + return Response{"Failed to connect to DB"}, err + } + defer db.Close() + + // sql.Open does not establish a connection — verify connectivity eagerly so + // failures surface here with a clear error rather than deep inside goose. + if err := db.PingContext(ctx); err != nil { + log.Printf("Failed to ping DB: %s", redactPassword(err.Error())) + return Response{"Failed to connect to DB"}, err + } + + // Handle teardown action — drops schema and user for environment cleanup + if action == "teardown" { + if schema == "public" { + return Response{"Cannot teardown public schema"}, fmt.Errorf("cannot teardown public schema") + } + if err := teardownSchemaAndUser(db, schema); err != nil { + log.Printf("Failed to teardown schema and user: %s", err.Error()) + return Response{"Failed to teardown schema and user"}, err + } + log.Printf("Teardown successful (schema: %s)", schema) + return Response{fmt.Sprintf("Teardown successful (schema: %s)", schema)}, nil + } + + // --- Migrate action (default) --- + + // GRANT_RDS_IAM controls whether the app user gets the rds_iam role (for IAM auth). + // When false (or unset), the user uses password authentication. + grantRdsIam := os.Getenv("GRANT_RDS_IAM") == "true" + + // Step 1: Create schema and app_user (runs as master user) + if schema != "public" { + if appUserSecretName == "" { + return Response{"APP_USER_SECRET_NAME is required when DB_SCHEMA is set"}, fmt.Errorf("APP_USER_SECRET_NAME is required when DB_SCHEMA is set") + } + if err := setupSchemaAndUser(db, schema, appUserSecretName, grantRdsIam); err != nil { + log.Printf("Failed to setup schema and user: %s", err.Error()) + return Response{"Failed to setup schema and user"}, err + } + } + + // Step 2: Run goose migrations. + // search_path is set via the connection URL (see buildPostgresURL), so every + // connection from the pool automatically targets the correct schema. + // SetDialect must be called when using sql.Open directly (as opposed to + // goose.OpenDBWithDriver, which infers the dialect from the driver name). + if err := goose.SetDialect("postgres"); err != nil { + log.Printf("Failed to set goose dialect: %s", err.Error()) + return Response{"Failed to set goose dialect"}, err + } + log.Println("Running goose.Up migrations...") + if err := goose.Up(db, "migrations"); err != nil { + log.Printf("Migration failed: %s", redactPassword(err.Error())) + return Response{"Migration failed"}, err + } + + // Step 3: Re-grant DML privileges on tables/sequences created by the migrations. + // ALTER DEFAULT PRIVILEGES (set in setupSchemaAndUser) covers future objects, + // but an explicit re-grant ensures privileges are correct even if the default + // privileges were modified or if migrations ran before they were set. + if schema != "public" { + appUsername := fmt.Sprintf("app_user_%s", schema) + quotedSchema := pq.QuoteIdentifier(schema) + quotedUser := pq.QuoteIdentifier(appUsername) + log.Printf("Re-granting privileges on migrated objects to '%s'...", appUsername) + // SQL injection safe: quotedSchema and quotedUser use pq.QuoteIdentifier() + regrants := []string{ + fmt.Sprintf("GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA %s TO %s", quotedSchema, quotedUser), + fmt.Sprintf("GRANT USAGE, SELECT, UPDATE ON ALL SEQUENCES IN SCHEMA %s TO %s", quotedSchema, quotedUser), + } + for _, g := range regrants { + if _, err := db.Exec(g); err != nil { // NOSONAR - g built with pq.QuoteIdentifier() + log.Printf("Failed to execute re-grant '%s': %s", g, err.Error()) + return Response{"Failed to re-grant privileges after migration"}, err + } + } + } + + log.Printf("Migration successful (schema: %s)", schema) + return Response{fmt.Sprintf("Migration successful (schema: %s)", schema)}, nil +} + +// redactPassword redacts the password in a Postgres connection URL for logging +func redactPassword(url string) string { + return regexp.MustCompile(`:[^:@/]+@`).ReplaceAllString(url, ":[REDACTED]@") +} + +func main() { + lambda.Start(HandleRequest) +} diff --git a/local-environment/docker-compose.yml b/local-environment/docker-compose.yml index 4b423ce32..202eb666c 100644 --- a/local-environment/docker-compose.yml +++ b/local-environment/docker-compose.yml @@ -64,20 +64,6 @@ services: - ./wiremock/__files:/home/wiremock/__files command: ["--verbose"] - db-migrate: - build: - context: ./scripts/database - dockerfile: Dockerfile - image: hometest-db-migrate:local - container_name: db-migrate - depends_on: - postgres-db: - condition: service_healthy - profiles: - - backend - volumes: - - ../database:/docker-entrypoint-initdb.d:ro - volumes: ui_node_modules: postgres_data: diff --git a/local-environment/scripts/database/Dockerfile b/local-environment/scripts/database/Dockerfile deleted file mode 100644 index 250db83ea..000000000 --- a/local-environment/scripts/database/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM kukymbr/goose-docker:3.27.0 AS goose-builder - -# RUN go install github.com/pressly/goose/v3/cmd/goose@v3.27.0 - -FROM postgres:17.9 - -COPY --from=goose-builder /bin/goose /usr/local/bin/goose -COPY db-migrate.sh /usr/local/bin/db-migrate.sh - -RUN chmod +x /usr/local/bin/goose /usr/local/bin/db-migrate.sh - -ENTRYPOINT ["bash", "/usr/local/bin/db-migrate.sh"] diff --git a/local-environment/scripts/database/db-migrate.sh b/local-environment/scripts/database/db-migrate.sh old mode 100644 new mode 100755 index 085e203af..4bcd065db --- a/local-environment/scripts/database/db-migrate.sh +++ b/local-environment/scripts/database/db-migrate.sh @@ -1,44 +1,134 @@ -#!/bin/bash -set -e +#!/usr/bin/env bash +# ----------------------------------------------------------------------------- +# Local database migration script. +# +# psql operations run inside the postgres-db container via docker exec - +# no local psql installation required. The mise-managed goose CLI runs on +# the host and connects to the container through the mapped port. +# +# Two-user pattern (mirrors production IAM separation): +# admin - superuser; runs 01-init.sql to create schema/roles +# app_migrator - schema owner; runs goose migrations and seed data so +# all created objects are owned by the correct role +# +# Execution order: +# 1. Init - schema, roles, extensions (01-init.sql as admin) +# 2. Goose - validate + apply pending migrations (as app_migrator) +# 3. Seed - local test data with wiremock URLs (as app_migrator) +# +# SQL files are streamed via stdin rather than -f because docker exec runs +# inside the container and cannot reference paths on the host filesystem. +# +# Usage: +# pnpm run local:service:db:migrate +# +# Prerequisites: +# - postgres-db container running: COMPOSE_PROFILES=backend pnpm run local:compose:up +# - goose on PATH via mise (pinned in .mise.toml) +# ----------------------------------------------------------------------------- + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)" + +CONTAINER="postgres-db" +DB_HOST="${POSTGRES_HOST:-localhost}" +DB_PORT="${POSTGRES_PORT:-5432}" +DB_NAME="${POSTGRES_DB:-local_hometest_db}" + +# Schema is fixed for local development and matches the hardcoded value in +# database/01-init.sql. The schema-per-environment pattern would usually be +# handled by the goose-migrator lambda when running in deployed envs. +DB_SCHEMA="hometest" -# Configuration variables -DB_HOST="postgres-db" ADMIN_USER="admin" ADMIN_PASSWORD="admin" + MIGRATOR_USER="app_migrator" MIGRATOR_PASSWORD="STRONG_PASSWORD_MIGRATOR" -LOCAL_DB="local_hometest_db" -SQL_DIR="/docker-entrypoint-initdb.d" -PSQL_OPTIONS="-v ON_ERROR_STOP=1" -DB_URL="postgresql://${MIGRATOR_USER}:${MIGRATOR_PASSWORD}@${DB_HOST}:5432/${LOCAL_DB}" -export PGHOST="$DB_HOST" +MIGRATIONS_DIR="${REPO_ROOT}/lambdas/goose-migrator-lambda/migrations" +DATABASE_DIR="${REPO_ROOT}/database" + +# search_path is set in the connection string so every goose connection +# automatically targets the correct schema without needing SET search_path. +GOOSE_DBSTRING="postgres://${MIGRATOR_USER}:${MIGRATOR_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}?sslmode=disable&search_path=${DB_SCHEMA}" + +# Colors +GREEN='\033[0;32m' +RED='\033[0;31m' +NC='\033[0m' + +log_info() { + echo -e "${GREEN}[INFO]${NC} $*" + return 0 +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $*" >&2 + return 0 +} + +# Run psql as admin inside the container. Credentials have full DDL access. +psql_admin() { + docker exec -i -e PGPASSWORD="${ADMIN_PASSWORD}" "${CONTAINER}" \ + psql -U "${ADMIN_USER}" -d "${DB_NAME}" "$@" + return $? +} + +# Run psql as app_migrator inside the container. This role owns the schema +# so objects created here (tables, sequences) have the correct ownership. +psql_migrator() { + docker exec -i -e PGPASSWORD="${MIGRATOR_PASSWORD}" "${CONTAINER}" \ + psql -U "${MIGRATOR_USER}" -d "${DB_NAME}" "$@" + return $? +} + +# --------------------------------------------------------------------------- +# Wait for PostgreSQL +# --------------------------------------------------------------------------- + +wait_for_postgres() { + local max_attempts=30 + local attempt=1 -echo "Starting database migration..." + log_info "Waiting for PostgreSQL to be ready..." + # Use a real query rather than pg_isready: pg_isready only checks the + # network socket and may succeed before the server accepts connections. + until docker exec -e PGPASSWORD="${ADMIN_PASSWORD}" "${CONTAINER}" \ + psql -U "${ADMIN_USER}" -d "${DB_NAME}" -c "SELECT 1" >/dev/null 2>&1; do + if [[ ${attempt} -ge ${max_attempts} ]]; then + log_error "PostgreSQL did not become ready after ${max_attempts} attempts" + return 1 + fi + echo -n "." + sleep 1 + ((attempt++)) + done + echo "" + log_info "PostgreSQL is ready." + return 0 +} -# Admin user operations -export PGPASSWORD="$ADMIN_PASSWORD" -export PGUSER="$ADMIN_USER" +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- -# echo "Step 0: Dropping existing database..." -# psql $PSQL_OPTIONS -d "postgres" -f "$SQL_DIR/00-delete.sql" +wait_for_postgres -echo -echo "Step 1: Initializing database..." -psql $PSQL_OPTIONS -d "$LOCAL_DB" -f "$SQL_DIR/01-init.sql" +log_info "=== Step 1: Initialising schema and roles ===" +psql_admin -v ON_ERROR_STOP=1 < "${DATABASE_DIR}/01-init.sql" -# Migrator user operations -export PGPASSWORD="$MIGRATOR_PASSWORD" -export PGUSER="$MIGRATOR_USER" +log_info "=== Step 2: Validating goose migrations ===" +GOOSE_DRIVER=postgres GOOSE_DBSTRING="${GOOSE_DBSTRING}" goose -dir "${MIGRATIONS_DIR}" validate -echo -echo "Step 2: Validating goose migrations..." -goose -dir "$SQL_DIR/migrations" validate +log_info "=== Step 3: Running goose migrations ===" +GOOSE_DRIVER=postgres GOOSE_DBSTRING="${GOOSE_DBSTRING}" goose -dir "${MIGRATIONS_DIR}" up -echo -echo "Step 3: Running goose migrations..." -goose -dir "$SQL_DIR/migrations" postgres "$DB_URL" up +log_info "=== Step 4: Loading seed data ===" +# Seed data runs as app_migrator (schema owner) and sets supplier service_url +# to http://wiremock:8080 so local requests are intercepted by WireMock. +psql_migrator -v ON_ERROR_STOP=1 < "${DATABASE_DIR}/03-seed-hometest-data.sql" -echo -echo "Step 4: Loading seed data..." -psql $PSQL_OPTIONS -d "$LOCAL_DB" -f "$SQL_DIR/03-seed-hometest-data.sql" +log_info "=== Database migration and seeding complete ===" diff --git a/mise.lock b/mise.lock index 9164174ed..2aa26e4e4 100644 --- a/mise.lock +++ b/mise.lock @@ -55,6 +55,42 @@ url = "https://github.com/gitleaks/gitleaks/releases/download/v8.30.1/gitleaks_8 checksum = "sha256:d29144deff3a68aa93ced33dddf84b7fdc26070add4aa0f4513094c8332afc4e" url = "https://github.com/gitleaks/gitleaks/releases/download/v8.30.1/gitleaks_8.30.1_windows_x64.zip" +[[tools.go]] +version = "1.26.2" +backend = "core:go" + +[tools.go."platforms.linux-arm64"] +checksum = "sha256:c958a1fe1b361391db163a485e21f5f228142d6f8b584f6bef89b26f66dc5b23" +url = "https://dl.google.com/go/go1.26.2.linux-arm64.tar.gz" + +[tools.go."platforms.linux-arm64-musl"] +checksum = "sha256:c958a1fe1b361391db163a485e21f5f228142d6f8b584f6bef89b26f66dc5b23" +url = "https://dl.google.com/go/go1.26.2.linux-arm64.tar.gz" + +[tools.go."platforms.linux-x64"] +checksum = "sha256:990e6b4bbba816dc3ee129eaeaf4b42f17c2800b88a2166c265ac1a200262282" +url = "https://dl.google.com/go/go1.26.2.linux-amd64.tar.gz" + +[tools.go."platforms.linux-x64-musl"] +checksum = "sha256:990e6b4bbba816dc3ee129eaeaf4b42f17c2800b88a2166c265ac1a200262282" +url = "https://dl.google.com/go/go1.26.2.linux-amd64.tar.gz" + +[tools.go."platforms.macos-arm64"] +checksum = "sha256:32af1522bf3e3ff3975864780a429cc0b41d190ec7bf90faa661d6d64566e7af" +url = "https://dl.google.com/go/go1.26.2.darwin-arm64.tar.gz" + +[tools.go."platforms.macos-x64"] +checksum = "sha256:bc3f1500d9968c36d705442d90ba91addf9271665033748b82532682e90a7966" +url = "https://dl.google.com/go/go1.26.2.darwin-amd64.tar.gz" + +[tools.go."platforms.windows-x64"] +checksum = "sha256:98eb3570bade15cb826b0909338df6cc6d2cf590bc39c471142002db3832b708" +url = "https://dl.google.com/go/go1.26.2.windows-amd64.zip" + +[[tools."go:github.com/pressly/goose/v3/cmd/goose"]] +version = "3.27.0" +backend = "go:github.com/pressly/goose/v3/cmd/goose" + [[tools.grype]] version = "v0.111.0" backend = "aqua:anchore/grype" diff --git a/package.json b/package.json index 7882566f1..e82177e7d 100644 --- a/package.json +++ b/package.json @@ -26,7 +26,7 @@ "local:service:ui:stop": "pnpm run local:compose:down -- ui", "local:service:db:start": "pnpm run local:compose:up -- postgres-db", "local:service:db:stop": "pnpm run local:compose:down -- postgres-db", - "local:service:db:migrate": "pnpm run local:compose -- --profile backend up --build db-migrate", + "local:service:db:migrate": "bash local-environment/scripts/database/db-migrate.sh", "local:service:localstack:start": "pnpm run local:compose:up -- localstack", "local:service:localstack:stop": "pnpm run local:compose:down -- localstack", "local:terraform": "terraform -chdir=local-environment/infra", diff --git a/sonar-project.properties b/sonar-project.properties index 4ff227241..c45e0649d 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -23,7 +23,8 @@ sonar.exclusions=\ **/*.md,\ ui/content/content.json,\ lambdas/scripts/**,\ - lambdas/jest/** + lambdas/jest/**,\ + lambdas/goose-migrator-lambda/src/main.go sonar.typescript.tsconfigPaths=lambdas/tsconfig.json,ui/tsconfig.json,tests/tsconfig.json sonar.javascript.environments=node,browser diff --git a/tests/test-data/OrderTestData.ts b/tests/test-data/OrderTestData.ts index a310fe648..4b2b03575 100644 --- a/tests/test-data/OrderTestData.ts +++ b/tests/test-data/OrderTestData.ts @@ -29,7 +29,8 @@ export interface OrderPayload { } export class OrderTestData { - static readonly PREVENTX_SUPPLIER_ID = "c1a2b3c4-1234-4def-8abc-123456789abc"; + // UUID from goose migration 000002_seed_home_test_data.sql + static readonly PREVENTX_SUPPLIER_ID = "11111111-1111-4111-8111-111111111111"; static readonly PREVENTX_SUPPLIER_NAME = "Preventx"; static readonly defaultOrder: OrderPayload = {