diff --git a/.github/workflows/stage-2-test.yaml b/.github/workflows/stage-2-test.yaml index 37148449..839d633d 100644 --- a/.github/workflows/stage-2-test.yaml +++ b/.github/workflows/stage-2-test.yaml @@ -126,6 +126,16 @@ jobs: - name: "Run linting" run: | make test-lint + test-lua-lint: + name: "Lua linting" + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: "Checkout code" + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: "Run luacheck" + run: | + make test-lua-lint test-typecheck: name: "Typecheck" runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index e97bd341..d1b1e760 100644 --- a/.gitignore +++ b/.gitignore @@ -12,7 +12,7 @@ version.json # Please, add your custom content below! -# dependencies +# Dependencies node_modules .node-version */node_modules @@ -22,3 +22,4 @@ node_modules dist .DS_Store .reports +*~ diff --git a/.luarc.json b/.luarc.json new file mode 100644 index 00000000..09ef6a0c --- /dev/null +++ b/.luarc.json @@ -0,0 +1,12 @@ +{ + "diagnostics": { + "globals": [ + "KEYS", + "ARGV", + "redis", + "cjson", + "cmsgpack", + "bit" + ] + } +} diff --git a/AGENTS.md b/AGENTS.md index 982ca631..0ef373b1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -23,25 +23,25 @@ Agents should look for a nested `AGENTS.md` in or near these areas before making ## Root package.json – role and usage -The root `package.json` is the orchestration manifestgit co for this repo. It does not ship application code; it wires up shared dev tooling and delegates to workspace-level projects. +The root `package.json` is the orchestration manifest for this repo. It does not ship application code; it wires up shared dev tooling and delegates to workspace-level projects. -- Workspaces: Declares the set of npm workspaces (e.g. under `lambdas/`, `utils/`, `tests/`, `scripts/`). Agents should add a new workspace path here when introducing a new npm project. -- Scripts: Provides top-level commands that fan out across workspaces using `--workspaces` (lint, typecheck, unit tests) and project-specific runners (e.g. `lambda-build`). +- Workspaces: Declares the set of pnpm workspaces (e.g. under `lambdas/`, `utils/`, `tests/`, `scripts/`). Agents should add a new workspace path here when introducing a new pnpm project. +- Scripts: Provides top-level commands that fan out across workspaces using `pnpm -r` (lint, typecheck, unit tests) and project-specific runners (e.g. `lambda-build`). - Dev tool dependencies: Centralises Jest, TypeScript, ESLint configurations and plugins to keep versions consistent across workspaces. Workspace projects should rely on these unless a local override is strictly needed. - Overrides/resolutions: Pins transitive dependencies (e.g. Jest/react-is) to avoid ecosystem conflicts. Agents must not remove overrides without verifying tests across all workspaces. Agent guidance: -- Before adding or removing a workspace, update the root `workspaces` array and ensure CI scripts still succeed with `npm run lint`, `npm run typecheck`, and `npm run test:unit` at the repo root. -- When adding repo-wide scripts, keep names consistent with existing patterns (e.g. `lint`, `lint:fix`, `typecheck`, `test:unit`, `lambda-build`) and prefer `--workspaces` fan-out. +- Before adding or removing a workspace, update the root `workspaces` array and ensure CI scripts still succeed with `pnpm run lint`, `pnpm run typecheck`, and `pnpm run test:unit` at the repo root. +- When adding repo-wide scripts, keep names consistent with existing patterns (e.g. `lint`, `lint:fix`, `typecheck`, `test:unit`, `lambda-build`) and prefer `pnpm -r` fan-out. - Do not publish from the root. If adding a new workspace intended for publication, mark that workspace package as `private: false` and keep the root as private. - Validate changes by running the repo pre-commit hooks: `make githooks-run`. Success criteria for changes affecting the root `package.json`: -- `npm run lint`, `npm run typecheck`, and `npm run test:unit` pass at the repo root. -- Workspace discovery is correct (new projects appear under `npm run typecheck --workspaces`). -- No regression in lambda build tooling (`npm run lambda-build`). +- `pnpm run lint`, `pnpm run typecheck`, and `pnpm run test:unit` pass at the repo root. +- Workspace discovery is correct (new projects appear under `pnpm run typecheck -r`). +- No regression in lambda build tooling (`pnpm run lambda-build`). ## What Agents Can / Can’t Do @@ -81,7 +81,7 @@ When proposing a change, agents should: to catch formatting and basic lint issues. Domain specific checks will be defined in appropriate nested AGENTS.md files. -- Suggest at least one extra validation step (for example `npm test:unit` in a lambda, or triggering a specific workflow). +- Suggest at least one extra validation step (for example `pnpm run test:unit` in a lambda, or triggering a specific workflow). - Any required follow up activites which fall outside of the current task's scope should be clearly marked with a 'TODO: CCM-12345' comment. The human user should be prompted to create and provide a JIRA ticket ID to be added to the comment. ## Security & Safety diff --git a/README.md b/README.md index 08fda19f..44026da2 100644 --- a/README.md +++ b/README.md @@ -102,7 +102,7 @@ make config Run unit tests for Lambda functions: ```shell -npm test +pnpm test:unit ``` ## Infrastructure diff --git a/docs/Makefile b/docs/Makefile index ea4bc005..a7854d6a 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -20,10 +20,10 @@ $(if $(BASE_URL),-- --baseurl $(BASE_URL),-- --baseurl "") endef build: version - npm run build $(baseurlparam) + pnpm run build $(baseurlparam) debug: version - npm run debug + pnpm run debug version: touch _config.version.yml diff --git a/docs/test-standards.md b/docs/test-standards.md index c8ab86cf..4fef9318 100644 --- a/docs/test-standards.md +++ b/docs/test-standards.md @@ -104,7 +104,7 @@ AI must: - Verify mock return types match the actual function return types. 7. **The "Test Execution" Mandate**: - - After creating or modifying a test, you MUST run it using the repo's test command - e.g. npm run test:unit + - After creating or modifying a test, you MUST run it using the repo's test command - e.g. pnpm run test:unit - If the test fails due to incorrect imports, paths, or signatures, fix and re-run. - Only report completion when the test passes (exit code 0) and test coverage checks also pass. - See section 6.2 for the full self-correction loop requirements. @@ -192,7 +192,7 @@ AI must: When AI changes tests, it must: -- run all the tests in the npm workspace. +- run all the tests in the pnpm workspace. - report exactly what it ran and whether it passed. ### 6.2 AI Self-Correction Loop diff --git a/eslint.config.mjs b/eslint.config.mjs index eb59432b..9ea6c3e0 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -28,6 +28,7 @@ export default defineConfig([ "**/test-results", "**/playwright-report*", "eslint.config.mjs", + "**/lua-transform.js", ]), //imports @@ -200,7 +201,7 @@ export default defineConfig([ }, }, { - files: ["**/utils/**", "tests/test-team/**", "tests/performance/helpers/**", "lambdas/**/src/**"], + files: ["**/utils/**", "tests/test-team/**", "tests/performance/helpers/**", "lambdas/**/src/**", "src/**/src/**"], rules: { "import-x/prefer-default-export": 0, }, diff --git a/infrastructure/terraform/components/callbacks/README.md b/infrastructure/terraform/components/callbacks/README.md index b1587725..02804698 100644 --- a/infrastructure/terraform/components/callbacks/README.md +++ b/infrastructure/terraform/components/callbacks/README.md @@ -8,7 +8,9 @@ |------|---------| | [terraform](#requirement\_terraform) | >= 1.10.1 | | [aws](#requirement\_aws) | 6.13 | +| [external](#requirement\_external) | ~> 2.0 | | [random](#requirement\_random) | ~> 3.0 | +| [tls](#requirement\_tls) | ~> 4.0 | ## Inputs | Name | Description | Type | Default | Required | @@ -18,6 +20,8 @@ | [component](#input\_component) | The variable encapsulating the name of this component | `string` | `"callbacks"` | no | | [default\_tags](#input\_default\_tags) | A map of default tags to apply to all taggable resources within the component | `map(string)` | `{}` | no | | [deploy\_mock\_clients](#input\_deploy\_mock\_clients) | Flag to deploy mock webhook lambda for integration testing (test/dev environments only) | `bool` | `false` | no | +| [deploy\_perf\_runner](#input\_deploy\_perf\_runner) | Flag to deploy the perf-runner lambda for performance testing (test/dev environments only) | `bool` | `false` | no | +| [elasticache\_data\_storage\_maximum\_gb](#input\_elasticache\_data\_storage\_maximum\_gb) | Maximum data storage in GB for the ElastiCache Serverless delivery state cache | `number` | `1` | no | | [enable\_event\_anomaly\_detection](#input\_enable\_event\_anomaly\_detection) | Enable CloudWatch anomaly detection alarm for inbound event queue message reception | `bool` | `true` | no | | [enable\_xray\_tracing](#input\_enable\_xray\_tracing) | Enable AWS X-Ray active tracing for Lambda functions | `bool` | `false` | no | | [environment](#input\_environment) | The name of the tfscaffold environment | `string` | n/a | yes | @@ -30,6 +34,7 @@ | [log\_level](#input\_log\_level) | The log level to be used in lambda functions within the component. Any log with a lower severity than the configured value will not be logged: https://docs.python.org/3/library/logging.html#levels | `string` | `"INFO"` | no | | [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | The retention period in days for the Cloudwatch Logs events to be retained, default of 0 is indefinite | `number` | `0` | no | | [message\_root\_uri](#input\_message\_root\_uri) | The root URI used for constructing message links in callback payloads | `string` | n/a | yes | +| [mtls\_cert\_secret\_arn](#input\_mtls\_cert\_secret\_arn) | Secrets Manager ARN for the shared mTLS client certificate (production) | `string` | `""` | no | | [parent\_acct\_environment](#input\_parent\_acct\_environment) | Name of the environment responsible for the acct resources used, affects things like DNS zone. Useful for named dev environments | `string` | `"main"` | no | | [pipe\_event\_patterns](#input\_pipe\_event\_patterns) | value | `list(string)` | `[]` | no | | [pipe\_log\_level](#input\_pipe\_log\_level) | Log level for the EventBridge Pipe. | `string` | `"ERROR"` | no | @@ -45,10 +50,12 @@ | Name | Source | Version | |------|--------|---------| | [client\_config\_bucket](#module\_client\_config\_bucket) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-s3bucket.zip | n/a | -| [client\_destination](#module\_client\_destination) | ../../modules/client-destination | n/a | +| [client\_delivery](#module\_client\_delivery) | ../../modules/client-delivery | n/a | | [client\_transform\_filter\_lambda](#module\_client\_transform\_filter\_lambda) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-lambda.zip | n/a | | [kms](#module\_kms) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-kms.zip | n/a | | [mock\_webhook\_lambda](#module\_mock\_webhook\_lambda) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-lambda.zip | n/a | +| [mtls\_test\_certs\_bucket](#module\_mtls\_test\_certs\_bucket) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-s3bucket.zip | n/a | +| [perf\_runner\_lambda](#module\_perf\_runner\_lambda) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-lambda.zip | n/a | | [sqs\_inbound\_event](#module\_sqs\_inbound\_event) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-sqs.zip | n/a | ## Outputs diff --git a/infrastructure/terraform/components/callbacks/cloudwatch_eventbus_main.tf b/infrastructure/terraform/components/callbacks/cloudwatch_eventbus_main.tf index f0ce95c4..2a6e687f 100644 --- a/infrastructure/terraform/components/callbacks/cloudwatch_eventbus_main.tf +++ b/infrastructure/terraform/components/callbacks/cloudwatch_eventbus_main.tf @@ -2,3 +2,9 @@ resource "aws_cloudwatch_event_bus" "main" { name = local.csi kms_key_identifier = module.kms.key_arn } + +resource "aws_cloudwatch_event_archive" "main" { + name = "${local.csi}-archive" + event_source_arn = aws_cloudwatch_event_bus.main.arn + retention_days = 7 +} diff --git a/infrastructure/terraform/components/callbacks/cloudwatch_metric_alarm_dlq_depth.tf b/infrastructure/terraform/components/callbacks/cloudwatch_metric_alarm_dlq_depth.tf deleted file mode 100644 index e6ed2d9d..00000000 --- a/infrastructure/terraform/components/callbacks/cloudwatch_metric_alarm_dlq_depth.tf +++ /dev/null @@ -1,31 +0,0 @@ -resource "aws_cloudwatch_metric_alarm" "client_dlq_depth" { - for_each = toset(keys(local.config_targets)) - - alarm_name = "${local.csi}-${each.key}-dlq-depth" - alarm_description = join(" ", [ - "RELIABILITY: Messages are in DLQ for ${each.key}.", - "Failed callback deliveries require operator attention.", - ]) - - comparison_operator = "GreaterThanThreshold" - evaluation_periods = 1 - metric_name = "ApproximateNumberOfMessagesVisible" - namespace = "AWS/SQS" - period = 300 - statistic = "Sum" - threshold = 0 - actions_enabled = true - treat_missing_data = "notBreaching" - - dimensions = { - QueueName = "${local.csi}-${each.key}-dlq-queue" - } - - tags = merge( - local.default_tags, - { - Name = "${local.csi}-${each.key}-dlq-depth" - Client = local.config_targets[each.key].client_id - }, - ) -} diff --git a/infrastructure/terraform/components/callbacks/elasticache_delivery_state.tf b/infrastructure/terraform/components/callbacks/elasticache_delivery_state.tf new file mode 100644 index 00000000..6b5d3da1 --- /dev/null +++ b/infrastructure/terraform/components/callbacks/elasticache_delivery_state.tf @@ -0,0 +1,268 @@ +resource "random_password" "elasticache_default_user" { + length = 32 + special = false +} + +resource "aws_elasticache_user" "delivery_state_default" { + user_id = "${local.csi}-valkey-default" + user_name = "default" + engine = "valkey" + access_string = "off -@all" + + authentication_mode { + type = "password" + passwords = [random_password.elasticache_default_user.result] + } + + tags = local.default_tags +} + +resource "aws_elasticache_user" "delivery_state_iam" { + user_id = "${local.csi}-elasticache-user" + user_name = "${local.csi}-elasticache-user" + engine = "valkey" + access_string = "on ~* &* +@all" + + authentication_mode { + type = "iam" + } + + tags = local.default_tags +} + +resource "aws_elasticache_user_group" "delivery_state" { + engine = "valkey" + user_group_id = "${local.csi}-delivery-state" + + user_ids = [ + aws_elasticache_user.delivery_state_default.user_id, + aws_elasticache_user.delivery_state_iam.user_id, + ] + + tags = local.default_tags +} + +resource "aws_elasticache_serverless_cache" "delivery_state" { + name = "${local.csi}-delivery-state" + engine = "valkey" + major_engine_version = "8" + description = "Per-target rate limiting and circuit breaker state for callback delivery" + + snapshot_retention_limit = 0 + + user_group_id = aws_elasticache_user_group.delivery_state.user_group_id + + security_group_ids = [aws_security_group.elasticache_delivery_state.id] + subnet_ids = try(local.acct.private_subnets[local.bc_name], []) + + kms_key_id = module.kms.key_arn + + cache_usage_limits { + data_storage { + maximum = var.elasticache_data_storage_maximum_gb + unit = "GB" + } + + ecpu_per_second { + maximum = 1000 + } + } + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-delivery-state" + Description = "Callback delivery rate limiter and circuit breaker state" + }, + ) +} + +resource "aws_security_group" "elasticache_delivery_state" { + name = "${local.csi}-elasticache-delivery-state" + description = "Security group for ElastiCache delivery state cluster" + vpc_id = local.acct.vpc_ids[local.bc_name] + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-elasticache-delivery-state" + }, + ) +} + +resource "aws_vpc_security_group_ingress_rule" "elasticache_from_lambda" { + security_group_id = aws_security_group.elasticache_delivery_state.id + referenced_security_group_id = aws_security_group.https_client_lambda.id + from_port = 6379 + to_port = 6379 + ip_protocol = "tcp" + description = "Allow HTTPS Client Lambda to connect to ElastiCache" + + tags = local.default_tags +} + +resource "aws_security_group" "https_client_lambda" { + name = "${local.csi}-https-client-lambda" + description = "Security group for per-client HTTPS Client Lambda functions" + vpc_id = local.acct.vpc_ids[local.bc_name] + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-https-client-lambda" + }, + ) +} + +resource "aws_vpc_security_group_egress_rule" "lambda_to_elasticache" { + security_group_id = aws_security_group.https_client_lambda.id + referenced_security_group_id = aws_security_group.elasticache_delivery_state.id + from_port = 6379 + to_port = 6379 + ip_protocol = "tcp" + description = "Allow Lambda to connect to ElastiCache" + + tags = local.default_tags +} + +resource "aws_vpc_security_group_egress_rule" "lambda_to_https" { + security_group_id = aws_security_group.https_client_lambda.id + cidr_ipv4 = "0.0.0.0/0" + from_port = 0 + to_port = 65535 + ip_protocol = "tcp" + description = "Allow Lambda outbound TCP for HTTPS webhook delivery (port defined per-client in webhook URL)" + + tags = local.default_tags +} + +resource "aws_cloudwatch_metric_alarm" "elasticache_storage_utilisation" { + alarm_name = "${local.csi}-elasticache-storage-utilisation" + alarm_description = join(" ", [ + "CAPACITY: ElastiCache data storage utilisation exceeds 80%.", + "Review stored data or increase elasticache_data_storage_maximum_gb.", + ]) + + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + threshold = var.elasticache_data_storage_maximum_gb * 0.8 + actions_enabled = true + treat_missing_data = "notBreaching" + + metric_query { + id = "storage_used" + return_data = false + + metric { + metric_name = "BytesUsedForCache" + namespace = "AWS/ElastiCache" + period = 300 + stat = "Maximum" + + dimensions = { + CacheClusterId = aws_elasticache_serverless_cache.delivery_state.name + } + } + } + + metric_query { + id = "storage_used_gb" + expression = "storage_used / 1073741824" + label = "Storage Used (GB)" + return_data = true + } + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-elasticache-storage-utilisation" + }, + ) +} + +resource "aws_cloudwatch_metric_alarm" "elasticache_ecpu_utilisation" { + alarm_name = "${local.csi}-elasticache-ecpu-utilisation" + alarm_description = join(" ", [ + "PERFORMANCE: ElastiCache processing units utilisation is high.", + "Consider scaling up or optimising Redis commands.", + ]) + + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 3 + metric_name = "ElastiCacheProcessingUnits" + namespace = "AWS/ElastiCache" + period = 300 + statistic = "Average" + threshold = 80 + actions_enabled = true + treat_missing_data = "notBreaching" + + dimensions = { + CacheClusterId = aws_elasticache_serverless_cache.delivery_state.name + } + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-elasticache-ecpu-utilisation" + }, + ) +} + +resource "aws_cloudwatch_metric_alarm" "elasticache_connections" { + alarm_name = "${local.csi}-elasticache-connections" + alarm_description = join(" ", [ + "RELIABILITY: ElastiCache connection count is high.", + "Review per-client Lambda connection pool sizing.", + ]) + + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + metric_name = "CurrConnections" + namespace = "AWS/ElastiCache" + period = 300 + statistic = "Maximum" + threshold = 500 + actions_enabled = true + treat_missing_data = "notBreaching" + + dimensions = { + CacheClusterId = aws_elasticache_serverless_cache.delivery_state.name + } + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-elasticache-connections" + }, + ) +} + +resource "aws_cloudwatch_metric_alarm" "elasticache_throttled_ops" { + alarm_name = "${local.csi}-elasticache-throttled-ops" + alarm_description = join(" ", [ + "PERFORMANCE: ElastiCache throttled operations detected.", + "Increase ECPU limit or reduce request rate.", + ]) + + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 2 + metric_name = "ThrottledCmds" + namespace = "AWS/ElastiCache" + period = 300 + statistic = "Sum" + threshold = 0 + actions_enabled = true + treat_missing_data = "notBreaching" + + dimensions = { + CacheClusterId = aws_elasticache_serverless_cache.delivery_state.name + } + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-elasticache-throttled-ops" + }, + ) +} diff --git a/infrastructure/terraform/components/callbacks/locals.tf b/infrastructure/terraform/components/callbacks/locals.tf index f4707154..68129a5b 100644 --- a/infrastructure/terraform/components/callbacks/locals.tf +++ b/infrastructure/terraform/components/callbacks/locals.tf @@ -1,4 +1,5 @@ locals { + bc_name = "client-callbacks" aws_lambda_functions_dir_path = "../../../../lambdas" log_destination_arn = "arn:aws:firehose:${var.region}:${var.aws_account_id}:deliverystream/nhs-main-obs-splunk-logs-firehose" root_domain_name = "${var.environment}.${local.acct.route53_zone_names["client-callbacks"]}" # e.g. [main|dev|abxy0].smsnudge.[dev|nonprod|prod].nhsnotify.national.nhs.uk @@ -12,6 +13,10 @@ locals { } ]...) + # SPKI hash of the mock webhook server certificate for cert-pinning enrichment. + # Computed via external data source because Terraform cannot SHA-256 hash raw binary (DER) data natively. + mock_server_spki_hash = var.deploy_mock_clients ? data.external.mock_server_spki_hash[0].result.hash : "" + # When deploying mock clients, replace sentinel placeholder values with the mock webhook URL and API key. # Only used for S3 object content — must not be used as a for_each source (contains apply-time values). enriched_mock_config_clients = var.deploy_mock_clients ? { @@ -20,47 +25,46 @@ locals { targets = [ for target in try(client.targets, []) : merge(target, { - invocationEndpoint = "${aws_lambda_function_url.mock_webhook[0].function_url}${target.targetId}" + invocationEndpoint = "https://${aws_lb.mock_webhook_mtls[0].dns_name}/${target.targetId}" apiKey = merge(target.apiKey, { headerValue = random_password.mock_webhook_api_key[0].result }) + delivery = merge(try(target.delivery, {}), { + mtls = merge(try(target.delivery.mtls, {}), { + certPinning = merge(try(target.delivery.mtls.certPinning, {}), try(target.delivery.mtls.certPinning.enabled, false) ? { + spkiHash = local.mock_server_spki_hash + } : {}) + }) + }) }) ] }) } : local.config_clients - config_targets = merge([ - for client_id, data in local.config_clients : { - for target in try(data.targets, []) : target.targetId => { - client_id = client_id - target_id = target.targetId - invocation_endpoint = var.deploy_mock_clients ? "${aws_lambda_function_url.mock_webhook[0].function_url}${target.targetId}" : target.invocationEndpoint - invocation_rate_limit_per_second = target.invocationRateLimit - http_method = target.invocationMethod - header_name = target.apiKey.headerName - header_value = var.deploy_mock_clients ? random_password.mock_webhook_api_key[0].result : target.apiKey.headerValue - } - } - ]...) - - config_subscriptions = merge([ - for client_id, data in local.config_clients : { - for subscription in try(data.subscriptions, []) : subscription.subscriptionId => { - client_id = client_id + client_subscriptions = { + for client_id, data in local.config_clients : + client_id => { + for subscription in try(data.subscriptions, []) : + subscription.subscriptionId => { subscription_id = subscription.subscriptionId target_ids = try(subscription.targetIds, []) } } - ]...) + } - subscription_targets = merge([ - for subscription_id, subscription in local.config_subscriptions : { - for target_id in subscription.target_ids : - "${subscription_id}-${target_id}" => { - subscription_id = subscription_id - target_id = target_id + client_subscription_targets = { + for client_id, data in local.config_clients : + client_id => merge([ + for subscription in try(data.subscriptions, []) : { + for target_id in try(subscription.targetIds, []) : + "${subscription.subscriptionId}-${target_id}" => { + subscription_id = subscription.subscriptionId + target_id = target_id + } } - } - ]...) + ]...) + } applications_map_parameter_name = coalesce(var.applications_map_parameter_name, "/${var.project}/${var.environment}/${var.component}/applications-map") + + client_config_bucket_arn = "arn:aws:s3:::${var.project}-${var.aws_account_id}-${var.region}-${var.environment}-${var.component}-subscription-config" } diff --git a/infrastructure/terraform/components/callbacks/module_client_delivery.tf b/infrastructure/terraform/components/callbacks/module_client_delivery.tf new file mode 100644 index 00000000..ebc2e9e1 --- /dev/null +++ b/infrastructure/terraform/components/callbacks/module_client_delivery.tf @@ -0,0 +1,46 @@ +module "client_delivery" { + source = "../../modules/client-delivery" + for_each = local.config_clients + + project = var.project + aws_account_id = var.aws_account_id + region = var.region + component = var.component + environment = var.environment + group = var.group + + client_id = each.key + client_bus_name = aws_cloudwatch_event_bus.main.name + kms_key_arn = module.kms.key_arn + + subscriptions = local.client_subscriptions[each.key] + subscription_targets = local.client_subscription_targets[each.key] + + client_config_bucket = module.client_config_bucket.bucket + client_config_bucket_arn = module.client_config_bucket.arn + + applications_map_parameter_name = local.applications_map_parameter_name + + lambda_s3_bucket = local.acct.s3_buckets["lambda_function_artefacts"]["id"] + lambda_code_base_path = local.aws_lambda_functions_dir_path + + force_lambda_code_deploy = var.force_lambda_code_deploy + log_level = var.log_level + log_retention_in_days = var.log_retention_in_days + enable_xray_tracing = var.enable_xray_tracing + + log_destination_arn = local.log_destination_arn + log_subscription_role_arn = local.acct.log_subscription_role_arn + + elasticache_endpoint = aws_elasticache_serverless_cache.delivery_state.endpoint[0].address + elasticache_cache_name = aws_elasticache_serverless_cache.delivery_state.name + elasticache_iam_username = "${var.project}-${var.environment}-${var.component}-elasticache-user" + + mtls_cert_secret_arn = var.mtls_cert_secret_arn + mtls_test_cert_s3_bucket = var.deploy_mock_clients ? module.mtls_test_certs_bucket[0].bucket : "" + mtls_test_cert_s3_key = local.mtls_test_cert_s3_key # gitleaks:allow + mtls_test_ca_s3_key = local.mtls_test_ca_s3_key # gitleaks:allow + + vpc_subnet_ids = try(local.acct.private_subnets[local.bc_name], []) + lambda_security_group_id = aws_security_group.https_client_lambda.id +} diff --git a/infrastructure/terraform/components/callbacks/module_client_destination.tf b/infrastructure/terraform/components/callbacks/module_client_destination.tf deleted file mode 100644 index 21800e94..00000000 --- a/infrastructure/terraform/components/callbacks/module_client_destination.tf +++ /dev/null @@ -1,17 +0,0 @@ -module "client_destination" { - source = "../../modules/client-destination" - - project = var.project - aws_account_id = var.aws_account_id - region = var.region - component = var.component - environment = var.environment - client_bus_name = aws_cloudwatch_event_bus.main.name - - kms_key_arn = module.kms.key_arn - - targets = local.config_targets - subscriptions = local.config_subscriptions - subscription_targets = local.subscription_targets - -} diff --git a/infrastructure/terraform/components/callbacks/module_mock_webhook_alb_mtls.tf b/infrastructure/terraform/components/callbacks/module_mock_webhook_alb_mtls.tf new file mode 100644 index 00000000..eb8b6776 --- /dev/null +++ b/infrastructure/terraform/components/callbacks/module_mock_webhook_alb_mtls.tf @@ -0,0 +1,93 @@ +resource "aws_security_group" "mock_webhook_alb" { + count = var.deploy_mock_clients ? 1 : 0 + name = "${local.csi}-mock-webhook-alb" + description = "Security group for mock webhook ALB mTLS endpoint" + vpc_id = local.acct.vpc_ids[local.bc_name] + + tags = merge( + local.default_tags, + { + Name = "${local.csi}-mock-webhook-alb" + }, + ) +} + +resource "aws_vpc_security_group_ingress_rule" "mock_webhook_alb_https" { + count = var.deploy_mock_clients ? 1 : 0 + security_group_id = aws_security_group.mock_webhook_alb[0].id + referenced_security_group_id = aws_security_group.https_client_lambda.id + from_port = 443 + to_port = 443 + ip_protocol = "tcp" + description = "Allow HTTPS Client Lambda to reach mock webhook (mTLS and non-mTLS)" + tags = local.default_tags +} + +resource "aws_vpc_security_group_egress_rule" "mock_webhook_alb_egress" { + count = var.deploy_mock_clients ? 1 : 0 + security_group_id = aws_security_group.mock_webhook_alb[0].id + ip_protocol = "-1" + cidr_ipv4 = "0.0.0.0/0" + tags = local.default_tags +} + +resource "aws_acm_certificate" "mock_webhook_server" { + count = var.deploy_mock_clients ? 1 : 0 + certificate_body = tls_locally_signed_cert.mock_server[0].cert_pem + private_key = tls_private_key.mock_server[0].private_key_pem + certificate_chain = tls_self_signed_cert.test_ca[0].cert_pem + tags = local.default_tags +} + +resource "aws_lb" "mock_webhook_mtls" { + count = var.deploy_mock_clients ? 1 : 0 + name = substr("${local.csi}-mock-mtls", 0, 32) + internal = true + load_balancer_type = "application" + security_groups = [aws_security_group.mock_webhook_alb[0].id] + subnets = try(local.acct.private_subnets[local.bc_name], []) + tags = local.default_tags +} + +resource "aws_lb_target_group" "mock_webhook_mtls" { + count = var.deploy_mock_clients ? 1 : 0 + name = substr("${local.csi}-mock-mtls", 0, 32) + target_type = "lambda" + tags = local.default_tags +} + +resource "aws_lambda_permission" "mock_webhook_mtls_alb" { + count = var.deploy_mock_clients ? 1 : 0 + statement_id = "AllowMtlsAlb" + action = "lambda:InvokeFunction" + function_name = module.mock_webhook_lambda[0].function_name + principal = "elasticloadbalancing.amazonaws.com" + source_arn = aws_lb_target_group.mock_webhook_mtls[0].arn +} + +resource "aws_lb_target_group_attachment" "mock_webhook_mtls" { + count = var.deploy_mock_clients ? 1 : 0 + target_group_arn = aws_lb_target_group.mock_webhook_mtls[0].arn + target_id = module.mock_webhook_lambda[0].function_arn + depends_on = [aws_lambda_permission.mock_webhook_mtls_alb] +} + +resource "aws_lb_listener" "mock_webhook_mtls" { + count = var.deploy_mock_clients ? 1 : 0 + load_balancer_arn = aws_lb.mock_webhook_mtls[0].arn + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-TLS13-1-2-2021-06" + certificate_arn = aws_acm_certificate.mock_webhook_server[0].arn + + mutual_authentication { + mode = "passthrough" + } + + default_action { + type = "forward" + target_group_arn = aws_lb_target_group.mock_webhook_mtls[0].arn + } + + tags = local.default_tags +} diff --git a/infrastructure/terraform/components/callbacks/module_mock_webhook_lambda.tf b/infrastructure/terraform/components/callbacks/module_mock_webhook_lambda.tf index b951351e..467dc1c6 100644 --- a/infrastructure/terraform/components/callbacks/module_mock_webhook_lambda.tf +++ b/infrastructure/terraform/components/callbacks/module_mock_webhook_lambda.tf @@ -64,34 +64,3 @@ data "aws_iam_policy_document" "mock_webhook_lambda" { ] } } - -# Lambda Function URL for mock webhook (test/dev only) -resource "aws_lambda_function_url" "mock_webhook" { - count = var.deploy_mock_clients ? 1 : 0 - function_name = module.mock_webhook_lambda[0].function_name - authorization_type = "NONE" # Public endpoint for testing - - cors { - allow_origins = ["*"] - allow_methods = ["POST"] - allow_headers = ["*"] - max_age = 86400 - } -} - -resource "aws_lambda_permission" "mock_webhook_function_url" { - count = var.deploy_mock_clients ? 1 : 0 - statement_id_prefix = "FunctionURLAllowPublicAccess" - action = "lambda:InvokeFunctionUrl" - function_name = module.mock_webhook_lambda[0].function_name - principal = "*" - function_url_auth_type = "NONE" -} - -resource "aws_lambda_permission" "mock_webhook_function_invoke" { - count = var.deploy_mock_clients ? 1 : 0 - statement_id_prefix = "FunctionURLAllowInvokeAction" - action = "lambda:InvokeFunction" - function_name = module.mock_webhook_lambda[0].function_name - principal = "*" -} diff --git a/infrastructure/terraform/components/callbacks/module_perf_runner_lambda.tf b/infrastructure/terraform/components/callbacks/module_perf_runner_lambda.tf new file mode 100644 index 00000000..424294a8 --- /dev/null +++ b/infrastructure/terraform/components/callbacks/module_perf_runner_lambda.tf @@ -0,0 +1,102 @@ +module "perf_runner_lambda" { + count = var.deploy_perf_runner ? 1 : 0 + source = "https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-lambda.zip" + + function_name = "perf-runner" + description = "Lambda function that executes performance tests against the client callbacks pipeline from within AWS" + + aws_account_id = var.aws_account_id + component = var.component + environment = var.environment + project = var.project + region = var.region + group = var.group + + log_retention_in_days = var.log_retention_in_days + kms_key_arn = module.kms.key_arn + + iam_policy_document = { + body = data.aws_iam_policy_document.perf_runner_lambda[0].json + } + + function_s3_bucket = local.acct.s3_buckets["lambda_function_artefacts"]["id"] + function_code_base_path = local.aws_lambda_functions_dir_path + function_code_dir = "perf-runner-lambda/dist" + handler_function_name = "handler" + runtime = "nodejs22.x" + memory = 512 + timeout = 900 + + log_level = var.log_level + force_lambda_code_deploy = var.force_lambda_code_deploy + enable_lambda_insights = false + enable_xray_tracing = false + + log_destination_arn = local.log_destination_arn + log_subscription_role_arn = local.acct.log_subscription_role_arn + + lambda_env_vars = { + ENVIRONMENT = var.environment + INBOUND_QUEUE_URL = module.sqs_inbound_event.sqs_queue_url + TRANSFORM_FILTER_LOG_GROUP = module.client_transform_filter_lambda.cloudwatch_log_group_name + DELIVERY_LOG_GROUP_PREFIX = "/aws/lambda/${local.csi}-https-client-" + } +} + +data "aws_iam_policy_document" "perf_runner_lambda" { + count = var.deploy_perf_runner ? 1 : 0 + + statement { + sid = "KMSPermissions" + effect = "Allow" + + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey", + ] + + resources = [ + module.kms.key_arn, + ] + } + + statement { + sid = "SQSSendMessage" + effect = "Allow" + + actions = [ + "sqs:SendMessage", + "sqs:SendMessageBatch", + ] + + resources = [ + module.sqs_inbound_event.sqs_queue_arn, + ] + } + + statement { + sid = "CloudWatchLogsInsightsQuery" + effect = "Allow" + + actions = [ + "logs:StartQuery", + "logs:StopQuery", + ] + + resources = [ + "arn:aws:logs:${var.region}:${var.aws_account_id}:log-group:${module.client_transform_filter_lambda.cloudwatch_log_group_name}:*", + "arn:aws:logs:${var.region}:${var.aws_account_id}:log-group:/aws/lambda/${local.csi}-https-client-*", + ] + } + + statement { + sid = "CloudWatchLogsInsightsResults" + effect = "Allow" + + actions = [ + "logs:GetQueryResults", + ] + + resources = ["*"] + } +} diff --git a/infrastructure/terraform/components/callbacks/module_transform_filter_lambda.tf b/infrastructure/terraform/components/callbacks/module_transform_filter_lambda.tf index fb1313f8..2b75ddd5 100644 --- a/infrastructure/terraform/components/callbacks/module_transform_filter_lambda.tf +++ b/infrastructure/terraform/components/callbacks/module_transform_filter_lambda.tf @@ -42,7 +42,6 @@ module "client_transform_filter_lambda" { CLIENT_SUBSCRIPTION_CONFIG_PREFIX = "client_subscriptions/" CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS = "60" MESSAGE_ROOT_URI = var.message_root_uri - APPLICATIONS_MAP_PARAMETER = local.applications_map_parameter_name } } @@ -70,7 +69,7 @@ data "aws_iam_policy_document" "client_transform_filter_lambda" { ] resources = [ - module.client_config_bucket.arn, + local.client_config_bucket_arn, ] } @@ -83,20 +82,7 @@ data "aws_iam_policy_document" "client_transform_filter_lambda" { ] resources = [ - "${module.client_config_bucket.arn}/*", - ] - } - - statement { - sid = "SSMApplicationsMapRead" - effect = "Allow" - - actions = [ - "ssm:GetParameter", - ] - - resources = [ - "arn:aws:ssm:${var.region}:${var.aws_account_id}:parameter${local.applications_map_parameter_name}", + "${local.client_config_bucket_arn}/*", ] } diff --git a/infrastructure/terraform/components/callbacks/pipes_pipe_main.tf b/infrastructure/terraform/components/callbacks/pipes_pipe_main.tf index 3fddfcca..ae914f4f 100644 --- a/infrastructure/terraform/components/callbacks/pipes_pipe_main.tf +++ b/infrastructure/terraform/components/callbacks/pipes_pipe_main.tf @@ -26,8 +26,7 @@ resource "aws_pipes_pipe" "main" { input_template = <, - "subscriptions": <$.subscriptions>, - "signatures": <$.signatures> + "subscriptions": <$.subscriptions> } EOF } diff --git a/infrastructure/terraform/components/callbacks/pre.sh b/infrastructure/terraform/components/callbacks/pre.sh index aa5d3dda..cac3b745 100755 --- a/infrastructure/terraform/components/callbacks/pre.sh +++ b/infrastructure/terraform/components/callbacks/pre.sh @@ -6,17 +6,21 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" # shellcheck source=_paths.sh source "${script_dir}/_paths.sh" -# Resolve deploy_mock_clients from tfvars; base_path/group/region/environment are in scope from terraform.sh +# Resolve deploy_mock_clients and deploy_perf_runner from tfvars; base_path/group/region/environment are in scope from terraform.sh deploy_mock_clients="false" +deploy_perf_runner="false" for _tfvar_file in \ "${base_path}/etc/group_${group}.tfvars" \ "${base_path}/etc/env_${region}_${environment}.tfvars"; do if [ -f "${_tfvar_file}" ]; then _val=$(grep -E '^\s*deploy_mock_clients\s*=' "${_tfvar_file}" | tail -1 | sed 's/.*=\s*//;s/\s*$//') [ -n "${_val}" ] && deploy_mock_clients="${_val}" + _val=$(grep -E '^\s*deploy_perf_runner\s*=' "${_tfvar_file}" | tail -1 | sed 's/.*=\s*//;s/\s*$//') + [ -n "${_val}" ] && deploy_perf_runner="${_val}" fi done echo "deploy_mock_clients resolved to: ${deploy_mock_clients}" +echo "deploy_perf_runner resolved to: ${deploy_perf_runner}" pnpm install --frozen-lockfile @@ -25,15 +29,13 @@ pnpm run generate-dependencies "${script_dir}/sync-client-config.sh" if [ "${deploy_mock_clients}" == "true" ]; then - shopt -s nullglob - existing_configs=("${clients_dir}"/*.json) - shopt -u nullglob - if [ "${#existing_configs[@]}" -eq 0 ]; then - cp "${bounded_context_root}/tests/integration/fixtures/subscriptions/"*.json "${clients_dir}/" - echo "Copied mock client subscription config fixtures into clients dir" - else - echo "Client configs already present from S3 sync; skipping fixture copy" - fi + cp "${bounded_context_root}/tests/integration/fixtures/subscriptions/"*.json "${clients_dir}/" + echo "Copied mock client subscription config fixtures into clients dir" +fi + +if [ "${deploy_perf_runner}" == "true" ]; then + cp "${bounded_context_root}/tests/performance/fixtures/subscriptions/"*.json "${clients_dir}/" + echo "Copied perf client subscription config fixtures into clients dir" fi pnpm run --recursive --if-present lambda-build diff --git a/infrastructure/terraform/components/callbacks/s3_bucket_client_config.tf b/infrastructure/terraform/components/callbacks/s3_bucket_client_config.tf index 8bf25c83..9943affd 100644 --- a/infrastructure/terraform/components/callbacks/s3_bucket_client_config.tf +++ b/infrastructure/terraform/components/callbacks/s3_bucket_client_config.tf @@ -55,7 +55,7 @@ data "aws_iam_policy_document" "client_config_bucket" { ] resources = [ - module.client_config_bucket.arn, + local.client_config_bucket_arn, ] } @@ -73,7 +73,7 @@ data "aws_iam_policy_document" "client_config_bucket" { ] resources = [ - "${module.client_config_bucket.arn}/*", + "${local.client_config_bucket_arn}/*", ] } @@ -91,8 +91,8 @@ data "aws_iam_policy_document" "client_config_bucket" { ] resources = [ - module.client_config_bucket.arn, - "${module.client_config_bucket.arn}/*" + local.client_config_bucket_arn, + "${local.client_config_bucket_arn}/*" ] condition { diff --git a/infrastructure/terraform/components/callbacks/s3_bucket_mtls_test_certs.tf b/infrastructure/terraform/components/callbacks/s3_bucket_mtls_test_certs.tf new file mode 100644 index 00000000..e1bd377e --- /dev/null +++ b/infrastructure/terraform/components/callbacks/s3_bucket_mtls_test_certs.tf @@ -0,0 +1,193 @@ +module "mtls_test_certs_bucket" { + count = var.deploy_mock_clients ? 1 : 0 + source = "https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-s3bucket.zip" + + name = "mtls-test-certs" + + aws_account_id = var.aws_account_id + component = var.component + environment = var.environment + project = var.project + region = var.region + + default_tags = merge( + local.default_tags, + { + Description = "mTLS test certificate material for non-production callback delivery" + } + ) + + kms_key_arn = module.kms.key_arn + force_destroy = var.s3_enable_force_destroy + versioning = false + object_ownership = "BucketOwnerPreferred" + bucket_key_enabled = true + + policy_documents = [ + data.aws_iam_policy_document.mtls_test_certs_bucket[0].json + ] +} + +data "aws_iam_policy_document" "mtls_test_certs_bucket" { + count = var.deploy_mock_clients ? 1 : 0 + + statement { + sid = "DenyInsecureTransport" + effect = "Deny" + + principals { + type = "*" + identifiers = ["*"] + } + + actions = [ + "s3:*", + ] + + resources = [ + "arn:aws:s3:::${var.project}-${var.aws_account_id}-${var.region}-${var.environment}-${var.component}-mtls-test-certs", + "arn:aws:s3:::${var.project}-${var.aws_account_id}-${var.region}-${var.environment}-${var.component}-mtls-test-certs/*" + ] + + condition { + test = "Bool" + variable = "aws:SecureTransport" + values = ["false"] + } + } +} + +locals { + mtls_test_certs_s3_prefix = "callbacks/mtls-test" + mtls_test_cert_s3_key = "${local.mtls_test_certs_s3_prefix}/client-bundle.pem" + mtls_test_ca_s3_key = "${local.mtls_test_certs_s3_prefix}/ca.pem" +} + +# --- TLS provider: generate test CA, client, and server certificates --- + +resource "tls_private_key" "test_ca" { + count = var.deploy_mock_clients ? 1 : 0 + algorithm = "ECDSA" + ecdsa_curve = "P256" +} + +resource "tls_self_signed_cert" "test_ca" { + count = var.deploy_mock_clients ? 1 : 0 + private_key_pem = tls_private_key.test_ca[0].private_key_pem + is_ca_certificate = true + validity_period_hours = 87600 + + subject { + common_name = "NHS Notify Test CA" + organization = "NHS Notify" + country = "GB" + } + + allowed_uses = [ + "cert_signing", + ] +} + +resource "tls_private_key" "test_client" { + count = var.deploy_mock_clients ? 1 : 0 + algorithm = "ECDSA" + ecdsa_curve = "P256" +} + +resource "tls_cert_request" "test_client" { + count = var.deploy_mock_clients ? 1 : 0 + private_key_pem = tls_private_key.test_client[0].private_key_pem + + subject { + common_name = "NHS Notify Callbacks Test Client" + organization = "NHS Notify" + country = "GB" + } +} + +resource "tls_locally_signed_cert" "test_client" { + count = var.deploy_mock_clients ? 1 : 0 + cert_request_pem = tls_cert_request.test_client[0].cert_request_pem + ca_private_key_pem = tls_private_key.test_ca[0].private_key_pem + ca_cert_pem = tls_self_signed_cert.test_ca[0].cert_pem + validity_period_hours = 87600 + + allowed_uses = [ + "digital_signature", + "client_auth", + ] +} + +resource "tls_private_key" "mock_server" { + count = var.deploy_mock_clients ? 1 : 0 + algorithm = "ECDSA" + ecdsa_curve = "P256" +} + +resource "tls_cert_request" "mock_server" { + count = var.deploy_mock_clients ? 1 : 0 + private_key_pem = tls_private_key.mock_server[0].private_key_pem + + subject { + common_name = "NHS Notify Mock Webhook Server" + organization = "NHS Notify" + country = "GB" + } + + dns_names = ["*.eu-west-2.elb.amazonaws.com"] +} + +resource "tls_locally_signed_cert" "mock_server" { + count = var.deploy_mock_clients ? 1 : 0 + cert_request_pem = tls_cert_request.mock_server[0].cert_request_pem + ca_private_key_pem = tls_private_key.test_ca[0].private_key_pem + ca_cert_pem = tls_self_signed_cert.test_ca[0].cert_pem + validity_period_hours = 87600 + + allowed_uses = [ + "digital_signature", + "key_encipherment", + "server_auth", + ] +} + +# --- S3 objects: Lambda reads certs from S3 at runtime --- + +resource "aws_s3_object" "mtls_test_client_bundle" { + count = var.deploy_mock_clients ? 1 : 0 + bucket = module.mtls_test_certs_bucket[0].id + key = local.mtls_test_cert_s3_key # gitleaks:allow + content = "${tls_locally_signed_cert.test_client[0].cert_pem}${tls_private_key.test_client[0].private_key_pem}" + + server_side_encryption = "aws:kms" + content_type = "application/x-pem-file" +} + +resource "aws_s3_object" "mtls_test_ca" { + count = var.deploy_mock_clients ? 1 : 0 + bucket = module.mtls_test_certs_bucket[0].id + key = local.mtls_test_ca_s3_key # gitleaks:allow + content = tls_self_signed_cert.test_ca[0].cert_pem + + server_side_encryption = "aws:kms" + content_type = "application/x-pem-file" +} + +# Compute the base64-encoded SHA-256 hash of the mock server's SPKI (Subject Public Key Info) DER. +# Used by cert-pinning clients to verify the server certificate during mTLS handshake. +data "external" "mock_server_spki_hash" { + count = var.deploy_mock_clients ? 1 : 0 + program = ["bash", "-c", <<-EOT + HASH=$(jq -r '.pem' \ + | openssl pkey -pubin -outform DER 2>/dev/null \ + | openssl dgst -sha256 -binary \ + | base64 \ + | tr -d '\n') + printf '{"hash":"%s"}' "$HASH" + EOT + ] + + query = { + pem = tls_private_key.mock_server[0].public_key_pem + } +} diff --git a/infrastructure/terraform/components/callbacks/variables.tf b/infrastructure/terraform/components/callbacks/variables.tf index 74a72d24..9c71492d 100644 --- a/infrastructure/terraform/components/callbacks/variables.tf +++ b/infrastructure/terraform/components/callbacks/variables.tf @@ -155,6 +155,12 @@ variable "deploy_mock_clients" { default = false } +variable "deploy_perf_runner" { + type = bool + description = "Flag to deploy the perf-runner lambda for performance testing (test/dev environments only)" + default = false +} + variable "enable_xray_tracing" { type = bool description = "Enable AWS X-Ray active tracing for Lambda functions" @@ -177,3 +183,15 @@ variable "s3_enable_force_destroy" { description = "Whether to enable force destroy for the S3 buckets created in this module" default = false } + +variable "mtls_cert_secret_arn" { + type = string + description = "Secrets Manager ARN for the shared mTLS client certificate (production)" + default = "" +} + +variable "elasticache_data_storage_maximum_gb" { + type = number + description = "Maximum data storage in GB for the ElastiCache Serverless delivery state cache" + default = 1 +} diff --git a/infrastructure/terraform/components/callbacks/versions.tf b/infrastructure/terraform/components/callbacks/versions.tf index 55552749..d91998a2 100644 --- a/infrastructure/terraform/components/callbacks/versions.tf +++ b/infrastructure/terraform/components/callbacks/versions.tf @@ -4,10 +4,18 @@ terraform { source = "hashicorp/aws" version = "6.13" } + external = { + source = "hashicorp/external" + version = "~> 2.0" + } random = { source = "hashicorp/random" version = "~> 3.0" } + tls = { + source = "hashicorp/tls" + version = "~> 4.0" + } } required_version = ">= 1.10.1" diff --git a/infrastructure/terraform/modules/client-delivery/README.md b/infrastructure/terraform/modules/client-delivery/README.md new file mode 100644 index 00000000..0a4965e7 --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/README.md @@ -0,0 +1,61 @@ + + + + +## Requirements + +No requirements. +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [applications\_map\_parameter\_name](#input\_applications\_map\_parameter\_name) | SSM Parameter Store path for the clientId-to-applicationData map | `string` | n/a | yes | +| [aws\_account\_id](#input\_aws\_account\_id) | Account ID | `string` | n/a | yes | +| [client\_bus\_name](#input\_client\_bus\_name) | EventBridge bus name for subscription rules | `string` | n/a | yes | +| [client\_config\_bucket](#input\_client\_config\_bucket) | S3 bucket name containing client subscription configuration | `string` | n/a | yes | +| [client\_config\_bucket\_arn](#input\_client\_config\_bucket\_arn) | S3 bucket ARN containing client subscription configuration | `string` | n/a | yes | +| [client\_id](#input\_client\_id) | Unique identifier for this client | `string` | n/a | yes | +| [component](#input\_component) | Component name | `string` | n/a | yes | +| [elasticache\_cache\_name](#input\_elasticache\_cache\_name) | ElastiCache cache name for SigV4 token presigning | `string` | `""` | no | +| [elasticache\_endpoint](#input\_elasticache\_endpoint) | ElastiCache Serverless endpoint URL | `string` | `""` | no | +| [elasticache\_iam\_username](#input\_elasticache\_iam\_username) | IAM username for ElastiCache authentication | `string` | `""` | no | +| [enable\_xray\_tracing](#input\_enable\_xray\_tracing) | Enable AWS X-Ray active tracing for the Lambda function | `bool` | `false` | no | +| [environment](#input\_environment) | The name of the tfscaffold environment | `string` | n/a | yes | +| [force\_lambda\_code\_deploy](#input\_force\_lambda\_code\_deploy) | Force Lambda code redeployment even when commit tag matches | `bool` | `false` | no | +| [group](#input\_group) | The name of the tfscaffold group | `string` | `null` | no | +| [kms\_key\_arn](#input\_kms\_key\_arn) | KMS Key ARN for encryption at rest | `string` | n/a | yes | +| [lambda\_batch\_size](#input\_lambda\_batch\_size) | Number of SQS messages per Lambda invocation | `number` | `10` | no | +| [lambda\_code\_base\_path](#input\_lambda\_code\_base\_path) | Base path to Lambda source code directories | `string` | n/a | yes | +| [lambda\_memory](#input\_lambda\_memory) | Lambda memory allocation in MB | `number` | `256` | no | +| [lambda\_s3\_bucket](#input\_lambda\_s3\_bucket) | S3 bucket for Lambda function artefacts | `string` | n/a | yes | +| [lambda\_security\_group\_id](#input\_lambda\_security\_group\_id) | Security group ID for the Lambda function | `string` | `""` | no | +| [lambda\_timeout](#input\_lambda\_timeout) | Lambda timeout in seconds | `number` | `30` | no | +| [log\_destination\_arn](#input\_log\_destination\_arn) | Firehose destination ARN for log forwarding | `string` | `""` | no | +| [log\_level](#input\_log\_level) | Log level for the Lambda function | `string` | `"INFO"` | no | +| [log\_retention\_in\_days](#input\_log\_retention\_in\_days) | CloudWatch log retention period in days | `number` | `0` | no | +| [log\_subscription\_role\_arn](#input\_log\_subscription\_role\_arn) | IAM role ARN for CloudWatch log subscription | `string` | `""` | no | +| [max\_retry\_duration\_seconds](#input\_max\_retry\_duration\_seconds) | Maximum retry window before messages are sent to DLQ | `number` | `7200` | no | +| [mtls\_cert\_secret\_arn](#input\_mtls\_cert\_secret\_arn) | Secrets Manager ARN for the mTLS client certificate | `string` | `""` | no | +| [mtls\_test\_ca\_s3\_key](#input\_mtls\_test\_ca\_s3\_key) | S3 key for dev CA certificate PEM bundle used for server verification | `string` | `""` | no | +| [mtls\_test\_cert\_s3\_bucket](#input\_mtls\_test\_cert\_s3\_bucket) | S3 bucket for dev mTLS test certificates | `string` | `""` | no | +| [mtls\_test\_cert\_s3\_key](#input\_mtls\_test\_cert\_s3\_key) | S3 key for dev mTLS test certificate bundle | `string` | `""` | no | +| [project](#input\_project) | The name of the tfscaffold project | `string` | n/a | yes | +| [region](#input\_region) | AWS Region | `string` | n/a | yes | +| [sqs\_max\_receive\_count](#input\_sqs\_max\_receive\_count) | Safety-net maximum receive count before a message moves to DLQ. Supplements the time-based retry window for cases where the Lambda fails before reaching the window check. | `number` | `100` | no | +| [sqs\_visibility\_timeout\_seconds](#input\_sqs\_visibility\_timeout\_seconds) | Visibility timeout for the per-client delivery queue | `number` | `60` | no | +| [subscription\_targets](#input\_subscription\_targets) | Flattened subscription-target fanout map keyed by subscription-target composite key |
map(object({
subscription_id = string
target_id = string
}))
| n/a | yes | +| [subscriptions](#input\_subscriptions) | Subscription definitions for this client, keyed by subscription\_id |
map(object({
subscription_id = string
target_ids = list(string)
}))
| n/a | yes | +| [vpc\_subnet\_ids](#input\_vpc\_subnet\_ids) | VPC subnet IDs for Lambda execution | `list(string)` | `[]` | no | +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [dlq\_delivery](#module\_dlq\_delivery) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-sqs.zip | n/a | +| [https\_client\_lambda](#module\_https\_client\_lambda) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-lambda.zip | n/a | +| [sqs\_delivery](#module\_sqs\_delivery) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.9/terraform-sqs.zip | n/a | +## Outputs + +No outputs. + + + diff --git a/infrastructure/terraform/modules/client-delivery/cloudwatch_event_rule_per_subscription.tf b/infrastructure/terraform/modules/client-delivery/cloudwatch_event_rule_per_subscription.tf new file mode 100644 index 00000000..63da2089 --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/cloudwatch_event_rule_per_subscription.tf @@ -0,0 +1,103 @@ +resource "aws_cloudwatch_event_rule" "per_subscription" { + for_each = var.subscriptions + + name = "${local.csi}-${each.key}" + description = "Client Callbacks event rule for client ${var.client_id} subscription ${each.key}" + event_bus_name = var.client_bus_name + + event_pattern = jsonencode({ + "detail" : { + "subscriptions" : [each.value.subscription_id] + } + }) + + tags = merge(local.default_tags, { + SubscriptionId = each.value.subscription_id + }) +} + +resource "aws_cloudwatch_event_target" "per_subscription_target" { + for_each = var.subscription_targets + + rule = aws_cloudwatch_event_rule.per_subscription[each.value.subscription_id].name + arn = module.sqs_delivery.sqs_queue_arn + target_id = "${local.csi}-${each.value.target_id}" + event_bus_name = var.client_bus_name + role_arn = aws_iam_role.eventbridge_sqs_target.arn + + sqs_target { + message_group_id = null + } + + input_transformer { + input_paths = { + payload = "$.detail.payload" + } + + input_template = "{\"payload\": , \"subscriptionId\": \"${each.value.subscription_id}\", \"targetId\": \"${each.value.target_id}\"}" + } + + dead_letter_config { + arn = module.dlq_delivery.sqs_queue_arn + } + + retry_policy { + maximum_retry_attempts = 0 + maximum_event_age_in_seconds = 60 + } +} + +resource "aws_iam_role" "eventbridge_sqs_target" { + name = "${local.client_prefix}-eb-sqs-role" + description = "Role for EventBridge to send messages to per-client SQS queue" + assume_role_policy = data.aws_iam_policy_document.eventbridge_sqs_assume.json + + tags = local.default_tags +} + +data "aws_iam_policy_document" "eventbridge_sqs_assume" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["events.amazonaws.com"] + } + } +} + +resource "aws_iam_role_policy" "eventbridge_sqs_send" { + name = "sqs-send" + role = aws_iam_role.eventbridge_sqs_target.id + policy = data.aws_iam_policy_document.eventbridge_sqs_send.json +} + +data "aws_iam_policy_document" "eventbridge_sqs_send" { + statement { + sid = "AllowSQSSendMessage" + effect = "Allow" + + actions = [ + "sqs:SendMessage", + ] + + resources = [ + module.sqs_delivery.sqs_queue_arn, + module.dlq_delivery.sqs_queue_arn, + ] + } + + statement { + sid = "AllowKMSForSQS" + effect = "Allow" + + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey", + ] + + resources = [ + var.kms_key_arn, + ] + } +} diff --git a/infrastructure/terraform/modules/client-delivery/iam_role_sqs_target.tf b/infrastructure/terraform/modules/client-delivery/iam_role_sqs_target.tf new file mode 100644 index 00000000..55162684 --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/iam_role_sqs_target.tf @@ -0,0 +1,151 @@ +data "aws_iam_policy_document" "https_client_lambda" { + statement { + sid = "KMSPermissions" + effect = "Allow" + + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey", + ] + + resources = [ + var.kms_key_arn, + ] + } + + statement { + sid = "SQSDeliveryQueueConsume" + effect = "Allow" + + actions = [ + "sqs:ReceiveMessage", + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:ChangeMessageVisibility", + ] + + resources = [ + module.sqs_delivery.sqs_queue_arn, + ] + } + + statement { + sid = "SQSDLQSend" + effect = "Allow" + + actions = [ + "sqs:SendMessage", + ] + + resources = [ + module.dlq_delivery.sqs_queue_arn, + ] + } + + statement { + sid = "SSMGetApplicationsMap" + effect = "Allow" + + actions = [ + "ssm:GetParameter", + ] + + resources = [ + "arn:aws:ssm:${var.region}:${var.aws_account_id}:parameter${var.applications_map_parameter_name}", + ] + } + + statement { + sid = "S3ClientConfigReadAccess" + effect = "Allow" + + actions = [ + "s3:GetObject", + ] + + resources = [ + "${var.client_config_bucket_arn}/client_subscriptions/*", + ] + } + + statement { + sid = "S3ClientConfigListAccess" + effect = "Allow" + + actions = [ + "s3:ListBucket", + ] + + resources = [ + var.client_config_bucket_arn, + ] + } + + dynamic "statement" { + for_each = var.lambda_security_group_id != "" ? [1] : [] + content { + sid = "VPCNetworkInterfacePermissions" + effect = "Allow" + + actions = [ + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + ] + + resources = [ + "*", + ] + } + } + + dynamic "statement" { + for_each = var.mtls_cert_secret_arn != "" ? [1] : [] + content { + sid = "SecretsManagerMTLSCert" + effect = "Allow" + + actions = [ + "secretsmanager:GetSecretValue", + ] + + resources = [ + var.mtls_cert_secret_arn, + ] + } + } + + dynamic "statement" { + for_each = var.mtls_test_cert_s3_bucket != "" ? [1] : [] + content { + sid = "S3MTLSTestCertReadAccess" + effect = "Allow" + + actions = [ + "s3:GetObject", + ] + + resources = [ + "arn:aws:s3:::${var.mtls_test_cert_s3_bucket}/${var.mtls_test_cert_s3_key}", + "arn:aws:s3:::${var.mtls_test_cert_s3_bucket}/${var.mtls_test_ca_s3_key}", + ] + } + } + + dynamic "statement" { + for_each = var.elasticache_endpoint != "" ? [1] : [] + content { + sid = "ElastiCacheConnect" + effect = "Allow" + + actions = [ + "elasticache:Connect", + ] + + resources = [ + "arn:aws:elasticache:${var.region}:${var.aws_account_id}:serverlesscache:${var.elasticache_cache_name}", + "arn:aws:elasticache:${var.region}:${var.aws_account_id}:user:${var.elasticache_iam_username}", + ] + } + } +} diff --git a/infrastructure/terraform/modules/client-delivery/locals.tf b/infrastructure/terraform/modules/client-delivery/locals.tf new file mode 100644 index 00000000..6ca35137 --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/locals.tf @@ -0,0 +1,21 @@ +locals { + csi = replace( + format( + "%s-%s-%s", + var.project, + var.environment, + var.component, + ), + "_", + "", + ) + + client_prefix = "${local.csi}-${var.client_id}" + + default_tags = { + Project = var.project + Environment = var.environment + Component = var.component + Client = var.client_id + } +} diff --git a/infrastructure/terraform/modules/client-delivery/module_dlq_per_client.tf b/infrastructure/terraform/modules/client-delivery/module_dlq_per_client.tf new file mode 100644 index 00000000..84c410dd --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/module_dlq_per_client.tf @@ -0,0 +1,43 @@ +module "dlq_delivery" { + source = "https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-sqs.zip" + + aws_account_id = var.aws_account_id + component = var.component + environment = var.environment + project = var.project + region = var.region + name = "${var.client_id}-delivery-dlq" + + sqs_kms_key_arn = var.kms_key_arn + + create_dlq = false +} + +resource "aws_cloudwatch_metric_alarm" "dlq_depth" { + alarm_name = "${local.client_prefix}-dlq-depth" + alarm_description = join(" ", [ + "RELIABILITY: Messages are in DLQ for client ${var.client_id}.", + "Failed callback deliveries require operator attention.", + ]) + + comparison_operator = "GreaterThanThreshold" + evaluation_periods = 1 + metric_name = "ApproximateNumberOfMessagesVisible" + namespace = "AWS/SQS" + period = 300 + statistic = "Sum" + threshold = 0 + actions_enabled = true + treat_missing_data = "notBreaching" + + dimensions = { + QueueName = "${local.client_prefix}-delivery-dlq-queue" + } + + tags = merge( + local.default_tags, + { + Name = "${local.client_prefix}-dlq-depth" + }, + ) +} diff --git a/infrastructure/terraform/modules/client-delivery/module_https_client_lambda.tf b/infrastructure/terraform/modules/client-delivery/module_https_client_lambda.tf new file mode 100644 index 00000000..1260d471 --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/module_https_client_lambda.tf @@ -0,0 +1,71 @@ +module "https_client_lambda" { + source = "https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-lambda.zip" + + function_name = "https-client-${var.client_id}" + description = "HTTPS delivery Lambda for client ${var.client_id}" + + aws_account_id = var.aws_account_id + component = var.component + environment = var.environment + project = var.project + region = var.region + group = var.group + + log_retention_in_days = var.log_retention_in_days + kms_key_arn = var.kms_key_arn + + iam_policy_document = { + body = data.aws_iam_policy_document.https_client_lambda.json + } + + function_s3_bucket = var.lambda_s3_bucket + function_code_base_path = var.lambda_code_base_path + function_code_dir = "https-client-lambda/dist" + function_include_common = true + handler_function_name = "handler" + runtime = "nodejs22.x" + memory = var.lambda_memory + timeout = var.lambda_timeout + log_level = var.log_level + + force_lambda_code_deploy = var.force_lambda_code_deploy + enable_lambda_insights = false + enable_xray_tracing = var.enable_xray_tracing + + log_destination_arn = var.log_destination_arn + log_subscription_role_arn = var.log_subscription_role_arn + + lambda_env_vars = { + APPLICATIONS_MAP_PARAMETER = var.applications_map_parameter_name + CLIENT_ID = var.client_id + CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS = "60" + CLIENT_SUBSCRIPTION_CONFIG_BUCKET = var.client_config_bucket + CLIENT_SUBSCRIPTION_CONFIG_PREFIX = "client_subscriptions/" + DLQ_URL = module.dlq_delivery.sqs_queue_url + ELASTICACHE_CACHE_NAME = var.elasticache_cache_name + ELASTICACHE_ENDPOINT = var.elasticache_endpoint + ELASTICACHE_IAM_USERNAME = var.elasticache_iam_username + ENVIRONMENT = var.environment + MAX_RETRY_DURATION_SECONDS = tostring(var.max_retry_duration_seconds) + METRICS_NAMESPACE = "nhs-notify-client-callbacks" + MTLS_CERT_SECRET_ARN = var.mtls_cert_secret_arn + MTLS_TEST_CA_S3_KEY = var.mtls_test_ca_s3_key # gitleaks:allow + MTLS_TEST_CERT_S3_BUCKET = var.mtls_test_cert_s3_bucket + MTLS_TEST_CERT_S3_KEY = var.mtls_test_cert_s3_key # gitleaks:allow + QUEUE_URL = module.sqs_delivery.sqs_queue_url + } + + vpc_config = var.lambda_security_group_id != "" ? { + subnet_ids = var.vpc_subnet_ids + security_group_ids = [var.lambda_security_group_id] + } : null +} + +resource "aws_lambda_event_source_mapping" "sqs_delivery" { + event_source_arn = module.sqs_delivery.sqs_queue_arn + function_name = module.https_client_lambda.function_arn + batch_size = var.lambda_batch_size + enabled = true + + function_response_types = ["ReportBatchItemFailures"] +} diff --git a/infrastructure/terraform/modules/client-delivery/module_sqs_per_client.tf b/infrastructure/terraform/modules/client-delivery/module_sqs_per_client.tf new file mode 100644 index 00000000..0fad559b --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/module_sqs_per_client.tf @@ -0,0 +1,47 @@ +module "sqs_delivery" { + source = "https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.9/terraform-sqs.zip" + + aws_account_id = var.aws_account_id + component = var.component + environment = var.environment + project = var.project + region = var.region + name = "${var.client_id}-delivery" + + sqs_kms_key_arn = var.kms_key_arn + + visibility_timeout_seconds = var.sqs_visibility_timeout_seconds + max_receive_count = var.sqs_max_receive_count + + create_dlq = false + + sqs_policy_overload = data.aws_iam_policy_document.sqs_delivery.json +} + +resource "aws_sqs_queue_redrive_policy" "delivery" { + queue_url = module.sqs_delivery.sqs_queue_url + redrive_policy = jsonencode({ + deadLetterTargetArn = module.dlq_delivery.sqs_queue_arn + maxReceiveCount = var.sqs_max_receive_count + }) +} + +data "aws_iam_policy_document" "sqs_delivery" { + statement { + sid = "AllowEventBridgeToSendMessage" + effect = "Allow" + + principals { + type = "Service" + identifiers = ["events.amazonaws.com"] + } + + actions = [ + "sqs:SendMessage", + ] + + resources = [ + "arn:aws:sqs:${var.region}:${var.aws_account_id}:${local.csi}-${var.client_id}-delivery-queue", + ] + } +} diff --git a/infrastructure/terraform/modules/client-delivery/variables.tf b/infrastructure/terraform/modules/client-delivery/variables.tf new file mode 100644 index 00000000..643e163e --- /dev/null +++ b/infrastructure/terraform/modules/client-delivery/variables.tf @@ -0,0 +1,212 @@ +variable "project" { + type = string + description = "The name of the tfscaffold project" +} + +variable "environment" { + type = string + description = "The name of the tfscaffold environment" +} + +variable "component" { + type = string + description = "Component name" +} + +variable "aws_account_id" { + type = string + description = "Account ID" +} + +variable "region" { + type = string + description = "AWS Region" +} + +variable "group" { + type = string + description = "The name of the tfscaffold group" + default = null +} + +variable "client_id" { + type = string + description = "Unique identifier for this client" +} + +variable "kms_key_arn" { + type = string + description = "KMS Key ARN for encryption at rest" +} + +variable "client_bus_name" { + type = string + description = "EventBridge bus name for subscription rules" +} + +variable "subscriptions" { + type = map(object({ + subscription_id = string + target_ids = list(string) + })) + description = "Subscription definitions for this client, keyed by subscription_id" +} + +variable "subscription_targets" { + type = map(object({ + subscription_id = string + target_id = string + })) + description = "Flattened subscription-target fanout map keyed by subscription-target composite key" +} + +variable "client_config_bucket" { + type = string + description = "S3 bucket name containing client subscription configuration" +} + +variable "client_config_bucket_arn" { + type = string + description = "S3 bucket ARN containing client subscription configuration" +} + +variable "applications_map_parameter_name" { + type = string + description = "SSM Parameter Store path for the clientId-to-applicationData map" +} + +variable "lambda_s3_bucket" { + type = string + description = "S3 bucket for Lambda function artefacts" +} + +variable "lambda_code_base_path" { + type = string + description = "Base path to Lambda source code directories" +} + +variable "force_lambda_code_deploy" { + type = bool + description = "Force Lambda code redeployment even when commit tag matches" + default = false +} + +variable "log_level" { + type = string + description = "Log level for the Lambda function" + default = "INFO" +} + +variable "log_retention_in_days" { + type = number + description = "CloudWatch log retention period in days" + default = 0 +} + +variable "log_destination_arn" { + type = string + description = "Firehose destination ARN for log forwarding" + default = "" +} + +variable "log_subscription_role_arn" { + type = string + description = "IAM role ARN for CloudWatch log subscription" + default = "" +} + +variable "lambda_batch_size" { + type = number + description = "Number of SQS messages per Lambda invocation" + default = 10 +} + +variable "lambda_memory" { + type = number + description = "Lambda memory allocation in MB" + default = 256 +} + +variable "lambda_timeout" { + type = number + description = "Lambda timeout in seconds" + default = 30 +} + +variable "max_retry_duration_seconds" { + type = number + description = "Maximum retry window before messages are sent to DLQ" + default = 7200 +} + +variable "sqs_visibility_timeout_seconds" { + type = number + description = "Visibility timeout for the per-client delivery queue" + default = 60 +} + +variable "sqs_max_receive_count" { + type = number + description = "Safety-net maximum receive count before a message moves to DLQ. Supplements the time-based retry window for cases where the Lambda fails before reaching the window check." + default = 100 +} + +variable "enable_xray_tracing" { + type = bool + description = "Enable AWS X-Ray active tracing for the Lambda function" + default = false +} + +variable "mtls_cert_secret_arn" { + type = string + description = "Secrets Manager ARN for the mTLS client certificate" + default = "" +} + +variable "mtls_test_cert_s3_bucket" { + type = string + description = "S3 bucket for dev mTLS test certificates" + default = "" +} + +variable "mtls_test_cert_s3_key" { + type = string + description = "S3 key for dev mTLS test certificate bundle" + default = "" +} + +variable "mtls_test_ca_s3_key" { + type = string + description = "S3 key for dev CA certificate PEM bundle used for server verification" + default = "" +} + +variable "elasticache_endpoint" { + type = string + description = "ElastiCache Serverless endpoint URL" + default = "" +} + +variable "elasticache_cache_name" { + type = string + description = "ElastiCache cache name for SigV4 token presigning" + default = "" +} + +variable "elasticache_iam_username" { + type = string + description = "IAM username for ElastiCache authentication" + default = "" +} + +variable "vpc_subnet_ids" { + type = list(string) + description = "VPC subnet IDs for Lambda execution" + default = [] +} + +variable "lambda_security_group_id" { + type = string + description = "Security group ID for the Lambda function" + default = "" +} diff --git a/infrastructure/terraform/modules/client-destination/README.md b/infrastructure/terraform/modules/client-destination/README.md deleted file mode 100644 index 11b689c3..00000000 --- a/infrastructure/terraform/modules/client-destination/README.md +++ /dev/null @@ -1,32 +0,0 @@ - - - - -## Requirements - -No requirements. -## Inputs - -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| [aws\_account\_id](#input\_aws\_account\_id) | Account ID | `string` | n/a | yes | -| [client\_bus\_name](#input\_client\_bus\_name) | EventBus name where you create the rule | `string` | n/a | yes | -| [component](#input\_component) | Component name | `string` | n/a | yes | -| [environment](#input\_environment) | The name of the tfscaffold environment | `string` | n/a | yes | -| [kms\_key\_arn](#input\_kms\_key\_arn) | KMS Key ARN | `string` | n/a | yes | -| [project](#input\_project) | The name of the tfscaffold project | `string` | n/a | yes | -| [region](#input\_region) | AWS Region | `string` | n/a | yes | -| [subscription\_targets](#input\_subscription\_targets) | Flattened subscription-target fanout map keyed by subscription-target composite key |
map(object({
subscription_id = string
target_id = string
}))
| n/a | yes | -| [subscriptions](#input\_subscriptions) | Flattened subscription definitions keyed by subscription\_id |
map(object({
client_id = string
subscription_id = string
target_ids = list(string)
}))
| n/a | yes | -| [targets](#input\_targets) | Flattened target definitions keyed by target\_id |
map(object({
client_id = string
target_id = string
invocation_endpoint = string
invocation_rate_limit_per_second = number
http_method = string
header_name = string
header_value = string
}))
| n/a | yes | -## Modules - -| Name | Source | Version | -|------|--------|---------| -| [target\_dlq](#module\_target\_dlq) | https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-sqs.zip | n/a | -## Outputs - -No outputs. - - - diff --git a/infrastructure/terraform/modules/client-destination/cloudwatch_event_api_destination_this.tf b/infrastructure/terraform/modules/client-destination/cloudwatch_event_api_destination_this.tf deleted file mode 100644 index 4bec92cc..00000000 --- a/infrastructure/terraform/modules/client-destination/cloudwatch_event_api_destination_this.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "aws_cloudwatch_event_api_destination" "per_target" { - for_each = var.targets - - name = "${local.csi}-${each.key}" - description = "API Destination for ${each.key}" - invocation_endpoint = each.value.invocation_endpoint - http_method = each.value.http_method - invocation_rate_limit_per_second = each.value.invocation_rate_limit_per_second - connection_arn = aws_cloudwatch_event_connection.per_target[each.key].arn -} diff --git a/infrastructure/terraform/modules/client-destination/cloudwatch_event_connection_main.tf b/infrastructure/terraform/modules/client-destination/cloudwatch_event_connection_main.tf deleted file mode 100644 index 7546d666..00000000 --- a/infrastructure/terraform/modules/client-destination/cloudwatch_event_connection_main.tf +++ /dev/null @@ -1,14 +0,0 @@ -resource "aws_cloudwatch_event_connection" "per_target" { - for_each = var.targets - - name = "${local.csi}-${each.key}" - description = "Event Connection which would be used by API Destination ${each.key}" - authorization_type = "API_KEY" - - auth_parameters { - api_key { - key = each.value.header_name - value = each.value.header_value - } - } -} diff --git a/infrastructure/terraform/modules/client-destination/cloudwatch_event_rule_main.tf b/infrastructure/terraform/modules/client-destination/cloudwatch_event_rule_main.tf deleted file mode 100644 index bdf7ea47..00000000 --- a/infrastructure/terraform/modules/client-destination/cloudwatch_event_rule_main.tf +++ /dev/null @@ -1,46 +0,0 @@ -resource "aws_cloudwatch_event_rule" "per_subscription" { - for_each = var.subscriptions - - name = "${local.csi}-${each.key}" - description = "Client Callbacks event rule for subscription ${each.key}" - event_bus_name = var.client_bus_name - - event_pattern = jsonencode({ - "detail" : { - "subscriptions" : [each.value.subscription_id] - } - }) -} - -resource "aws_cloudwatch_event_target" "per_subscription_target" { - for_each = var.subscription_targets - - rule = aws_cloudwatch_event_rule.per_subscription[each.value.subscription_id].name - arn = aws_cloudwatch_event_api_destination.per_target[each.value.target_id].arn - target_id = "${local.csi}-${each.value.target_id}" - role_arn = aws_iam_role.api_target_role.arn - event_bus_name = var.client_bus_name - - dead_letter_config { - arn = module.target_dlq[each.value.target_id].sqs_queue_arn - } - - input_transformer { - input_paths = { - data = "$.detail.payload.data" - } - - input_template = "{\"data\": }" - } - - http_target { - header_parameters = { - "x-hmac-sha256-signature" = "$.detail.signatures.${replace(each.value.target_id, "-", "_")}" - } - } - - retry_policy { - maximum_retry_attempts = 3 - maximum_event_age_in_seconds = 3600 - } -} diff --git a/infrastructure/terraform/modules/client-destination/iam_role_api_target_role.tf b/infrastructure/terraform/modules/client-destination/iam_role_api_target_role.tf deleted file mode 100644 index 1158a2b2..00000000 --- a/infrastructure/terraform/modules/client-destination/iam_role_api_target_role.tf +++ /dev/null @@ -1,83 +0,0 @@ -resource "aws_iam_role" "api_target_role" { - name = "${local.csi}-api-target-target-role" - description = "Role for client target rule" - assume_role_policy = data.aws_iam_policy_document.api_target_role_assume_role_policy.json -} - -data "aws_iam_policy_document" "api_target_role_assume_role_policy" { - statement { - actions = [ - "sts:AssumeRole" - ] - - principals { - type = "Service" - identifiers = ["events.amazonaws.com"] - } - } -} - -resource "aws_iam_role_policy_attachment" "api_target_role" { - role = aws_iam_role.api_target_role.id - policy_arn = aws_iam_policy.api_target_role.arn -} - -resource "aws_iam_policy" "api_target_role" { - name = "${local.csi}-api-target-role-policy" - description = "IAM Policy for the client target role" - path = "/" - policy = data.aws_iam_policy_document.api_target_role.json -} - -data "aws_iam_policy_document" "api_target_role" { - dynamic "statement" { - for_each = length(aws_cloudwatch_event_api_destination.per_target) > 0 ? [1] : [] - content { - sid = "AllowAPIDestinationAccess" - effect = "Allow" - - actions = [ - "events:InvokeApiDestination", - ] - - resources = [ - for destination in aws_cloudwatch_event_api_destination.per_target : - destination.arn - ] - } - } - - dynamic "statement" { - for_each = length(module.target_dlq) > 0 ? [1] : [] - content { - sid = "AllowSQSSendMessageForDLQ" - effect = "Allow" - - actions = [ - "sqs:SendMessage", - ] - - resources = [ - for dlq in module.target_dlq : - dlq.sqs_queue_arn - ] - } - } - - statement { - sid = "AllowKMSForDLQ" - effect = "Allow" - - actions = [ - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:Encrypt", - "kms:DescribeKey", - "kms:Decrypt" - ] - - resources = [ - var.kms_key_arn, - ] - } -} diff --git a/infrastructure/terraform/modules/client-destination/locals.tf b/infrastructure/terraform/modules/client-destination/locals.tf deleted file mode 100644 index fe672990..00000000 --- a/infrastructure/terraform/modules/client-destination/locals.tf +++ /dev/null @@ -1,12 +0,0 @@ -locals { - csi = replace( - format( - "%s-%s-%s", - var.project, - var.environment, - var.component, - ), - "_", - "", - ) -} diff --git a/infrastructure/terraform/modules/client-destination/module_target_dlq.tf b/infrastructure/terraform/modules/client-destination/module_target_dlq.tf deleted file mode 100644 index 36c4c277..00000000 --- a/infrastructure/terraform/modules/client-destination/module_target_dlq.tf +++ /dev/null @@ -1,41 +0,0 @@ -module "target_dlq" { - source = "https://github.com/NHSDigital/nhs-notify-shared-modules/releases/download/3.0.7/terraform-sqs.zip" - for_each = var.targets - - aws_account_id = var.aws_account_id - component = var.component - environment = var.environment - project = var.project - region = var.region - name = "${each.key}-dlq" - - sqs_kms_key_arn = var.kms_key_arn - - visibility_timeout_seconds = 60 - - create_dlq = false - - sqs_policy_overload = data.aws_iam_policy_document.target_dlq[each.key].json -} - -data "aws_iam_policy_document" "target_dlq" { - for_each = var.targets - - statement { - sid = "AllowEventBridgeToSendMessage" - effect = "Allow" - - principals { - type = "Service" - identifiers = ["events.amazonaws.com"] - } - - actions = [ - "sqs:SendMessage" - ] - - resources = [ - "arn:aws:sqs:${var.region}:${var.aws_account_id}:${var.project}-${var.environment}-${var.component}-${each.key}-dlq-queue" - ] - } -} diff --git a/infrastructure/terraform/modules/client-destination/variables.tf b/infrastructure/terraform/modules/client-destination/variables.tf deleted file mode 100644 index 2b9a0ceb..00000000 --- a/infrastructure/terraform/modules/client-destination/variables.tf +++ /dev/null @@ -1,67 +0,0 @@ -variable "project" { - type = string - description = "The name of the tfscaffold project" -} - -variable "environment" { - type = string - description = "The name of the tfscaffold environment" -} - -variable "component" { - type = string - description = "Component name" -} - -variable "aws_account_id" { - type = string - description = "Account ID" -} - -variable "region" { - type = string - description = "AWS Region" -} - -variable "targets" { - type = map(object({ - client_id = string - target_id = string - invocation_endpoint = string - invocation_rate_limit_per_second = number - http_method = string - header_name = string - header_value = string - })) - - description = "Flattened target definitions keyed by target_id" -} - -variable "subscriptions" { - type = map(object({ - client_id = string - subscription_id = string - target_ids = list(string) - })) - - description = "Flattened subscription definitions keyed by subscription_id" -} - -variable "subscription_targets" { - type = map(object({ - subscription_id = string - target_id = string - })) - - description = "Flattened subscription-target fanout map keyed by subscription-target composite key" -} - -variable "client_bus_name" { - type = string - description = "EventBus name where you create the rule" -} - -variable "kms_key_arn" { - type = string - description = "KMS Key ARN" -} diff --git a/knip.ts b/knip.ts index 3dd626cb..f8612f70 100644 --- a/knip.ts +++ b/knip.ts @@ -18,7 +18,7 @@ const config: KnipConfig = { // ESLint peer deps – referenced indirectly through plugin configs "@stylistic/eslint-plugin", "@typescript-eslint/parser", - // Used in lambdas' lambda-build npm script via pnpm exec + // Used in lambdas' lambda-build script via pnpm exec "esbuild", // Used in scripts/tests/unit.sh (shell script, not scanned by knip) "lcov-result-merger", @@ -32,9 +32,21 @@ const config: KnipConfig = { // Resolved transitively through tsconfig.base.json → @tsconfig/node22 ignoreDependencies: ["@tsconfig/node22"], }, + "lambdas/https-client-lambda": { + ignoreDependencies: ["@tsconfig/node22"], + }, "lambdas/mock-webhook-lambda": { ignoreDependencies: ["@tsconfig/node22"], }, + "lambdas/perf-runner-lambda": { + ignoreDependencies: ["@tsconfig/node22", "@types/aws-lambda"], + }, + "src/config-cache": { + ignoreDependencies: ["@tsconfig/node22"], + }, + "src/config-subscription-cache": { + ignoreDependencies: ["@tsconfig/node22"], + }, "src/logger": { ignoreDependencies: ["@tsconfig/node22"], }, @@ -42,6 +54,7 @@ const config: KnipConfig = { ignoreDependencies: ["@tsconfig/node22"], }, "tests/integration": { + entry: ["helpers/**/*.ts"], ignoreDependencies: [ "@tsconfig/node22", // Used in helpers/sqs.ts and helpers/cloudwatch.ts; flagged because @@ -49,9 +62,6 @@ const config: KnipConfig = { "async-wait-until", ], }, - "tests/performance": { - ignoreDependencies: ["@tsconfig/node22"], - }, "tests/test-support": { ignoreDependencies: ["@tsconfig/node22"], }, diff --git a/lambdas/client-transform-filter-lambda/package.json b/lambdas/client-transform-filter-lambda/package.json index 266911da..668250b4 100644 --- a/lambdas/client-transform-filter-lambda/package.json +++ b/lambdas/client-transform-filter-lambda/package.json @@ -1,7 +1,7 @@ { "dependencies": { "@aws-sdk/client-s3": "catalog:aws", - "@aws-sdk/client-ssm": "catalog:aws", + "@nhs-notify-client-callbacks/config-subscription-cache": "workspace:*", "@nhs-notify-client-callbacks/logger": "workspace:*", "@nhs-notify-client-callbacks/models": "workspace:*", "aws-embedded-metrics": "catalog:app", diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/index.component.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/index.component.test.ts index b46c49f8..b234a244 100644 --- a/lambdas/client-transform-filter-lambda/src/__tests__/index.component.test.ts +++ b/lambdas/client-transform-filter-lambda/src/__tests__/index.component.test.ts @@ -15,26 +15,12 @@ jest.mock("@aws-sdk/client-s3", () => { }; }); -const mockSsmSend = jest.fn(); -jest.mock("@aws-sdk/client-ssm", () => { - const actual = jest.requireActual("@aws-sdk/client-ssm"); - return { - ...actual, - SSMClient: jest.fn().mockImplementation(() => ({ - send: mockSsmSend, - })), - }; -}); - -// Set environment variables before importing the handler/module under test so that -// services constructed at module import time (e.g. applicationsMapService) see -// the correct configuration. +// Set environment variables before importing the handler/module under test. process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET = "test-bucket"; process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX = "client_subscriptions/"; process.env.CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS = "60"; process.env.METRICS_NAMESPACE = "test-namespace"; process.env.ENVIRONMENT = "test"; -process.env.APPLICATIONS_MAP_PARAMETER = "/test/applications-map"; jest.mock("aws-embedded-metrics", () => ({ createMetricsLogger: jest.fn(() => ({ @@ -50,12 +36,11 @@ jest.mock("aws-embedded-metrics", () => ({ })); import { GetObjectCommand, NoSuchKey } from "@aws-sdk/client-s3"; -import { GetParameterCommand } from "@aws-sdk/client-ssm"; import type { SQSRecord } from "aws-lambda"; import { EventTypes } from "@nhs-notify-client-callbacks/models"; import { createMessageStatusConfig } from "__tests__/helpers/client-subscription-fixtures"; import { createS3Client } from "services/config-loader-service"; -import { applicationsMapService, configLoaderService, handler } from ".."; +import { configLoaderService, handler } from ".."; const makeSqsRecord = (body: object): SQSRecord => ({ messageId: "sqs-id", @@ -104,18 +89,8 @@ const validMessageStatusEvent = (clientId: string, messageStatus: string) => ({ }); describe("Lambda handler with S3 subscription filtering", () => { - const applicationsMap = JSON.stringify({ - "client-1": "app-id-1", - "client-a": "app-id-a", - "client-b": "app-id-b", - "client-no-config": "app-id-no-config", - }); - beforeEach(() => { mockSend.mockClear(); - mockSsmSend.mockClear(); - applicationsMapService.reset(); - mockSsmSend.mockResolvedValue({ Parameter: { Value: applicationsMap } }); // Reset loader and clear cache for clean state between tests configLoaderService.reset( createS3Client({ AWS_ENDPOINT_URL: "http://localhost:4566" }), @@ -129,7 +104,6 @@ describe("Lambda handler with S3 subscription filtering", () => { delete process.env.CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS; delete process.env.METRICS_NAMESPACE; delete process.env.ENVIRONMENT; - delete process.env.APPLICATIONS_MAP_PARAMETER; }); it("passes event through when client config matches subscription", async () => { @@ -148,12 +122,8 @@ describe("Lambda handler with S3 subscription filtering", () => { expect(result).toHaveLength(1); expect(mockSend).toHaveBeenCalledTimes(1); expect(mockSend.mock.calls[0][0]).toBeInstanceOf(GetObjectCommand); - expect(mockSsmSend).toHaveBeenCalledTimes(1); - expect(mockSsmSend.mock.calls[0][0]).toBeInstanceOf(GetParameterCommand); expect(result[0]).toHaveProperty("payload"); expect(result[0]).toHaveProperty("subscriptions"); - expect(result[0]).toHaveProperty("signatures"); - expect(Object.values(result[0].signatures)[0]).toMatch(/^[0-9a-f]+$/); }); it("filters out event when status is not in subscription", async () => { @@ -251,25 +221,4 @@ describe("Lambda handler with S3 subscription filtering", () => { // S3 fetched once per distinct client (client-a and client-b), not once per event expect(mockSend).toHaveBeenCalledTimes(2); }); - - it("filters out event when no applicationId found in SSM map", async () => { - mockSend.mockResolvedValue({ - Body: { - transformToString: jest - .fn() - .mockResolvedValue( - JSON.stringify(createValidConfig("client-unknown")), - ), - }, - }); - mockSsmSend.mockResolvedValue({ - Parameter: { Value: JSON.stringify({}) }, - }); - - const result = await handler([ - makeSqsRecord(validMessageStatusEvent("client-unknown", "DELIVERED")), - ]); - - expect(result).toHaveLength(0); - }); }); diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/index.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/index.test.ts index 14b10096..168d128d 100644 --- a/lambdas/client-transform-filter-lambda/src/__tests__/index.test.ts +++ b/lambdas/client-transform-filter-lambda/src/__tests__/index.test.ts @@ -10,7 +10,6 @@ import type { import type { Logger } from "services/logger"; import type { CallbackMetrics } from "services/metrics"; import type { ConfigLoader } from "services/config-loader"; -import type { ApplicationsMapService } from "services/ssm-applications-map"; import { ObservabilityService } from "services/observability"; import { ConfigLoaderService } from "services/config-loader-service"; import { @@ -71,15 +70,6 @@ const makeStubConfigLoaderService = (): ConfigLoaderService => { return { getLoader: () => loader } as unknown as ConfigLoaderService; }; -const makeStubApplicationsMapService = (): ApplicationsMapService => - ({ - getApplicationId: jest - .fn() - .mockImplementation( - async (clientId: string) => `test-app-id-${clientId}`, - ), - }) as unknown as ApplicationsMapService; - describe("Lambda handler", () => { const mockLogger = { info: jest.fn(), @@ -109,7 +99,6 @@ describe("Lambda handler", () => { createObservabilityService: () => new ObservabilityService(mockLogger, mockMetrics, mockMetricsLogger), createConfigLoaderService: makeStubConfigLoaderService, - createApplicationsMapService: makeStubApplicationsMapService, }); beforeEach(() => { @@ -173,7 +162,6 @@ describe("Lambda handler", () => { expect(result).toHaveLength(1); expect(result[0]).toHaveProperty("payload"); expect(result[0]).toHaveProperty("subscriptions"); - expect(result[0]).toHaveProperty("signatures"); const dataItem = result[0].payload.data[0]; expect(dataItem.type).toBe("MessageStatus"); expect((dataItem.attributes as MessageStatusAttributes).messageStatus).toBe( @@ -203,7 +191,6 @@ describe("Lambda handler", () => { new ObservabilityService(mockLogger, mockMetrics, mockMetricsLogger), createConfigLoaderService: () => ({ getLoader: () => customConfigLoader }) as ConfigLoaderService, - createApplicationsMapService: makeStubApplicationsMapService, }); const sqsMessage: SQSRecord = { @@ -234,65 +221,6 @@ describe("Lambda handler", () => { ); }); - it("should throw when any target is missing an apiKey", async () => { - const customConfigLoader = { - loadClientConfig: jest.fn().mockResolvedValue( - createClientSubscriptionConfig("client-abc-123", { - subscriptions: [ - createMessageStatusSubscription(["DELIVERED"], { - targetIds: ["target-no-key", DEFAULT_TARGET_ID], - }), - ], - targets: [ - createTarget({ - targetId: "target-no-key", - apiKey: undefined as unknown as { - headerName: string; - headerValue: string; - }, - }), - createTarget({ - targetId: DEFAULT_TARGET_ID, - apiKey: { - headerName: "x-api-key", - headerValue: "valid-key", - }, - }), - ], - }), - ), - } as unknown as ConfigLoader; - - const handlerWithMixedTargets = createHandler({ - createObservabilityService: () => - new ObservabilityService(mockLogger, mockMetrics, mockMetricsLogger), - createConfigLoaderService: () => - ({ getLoader: () => customConfigLoader }) as ConfigLoaderService, - createApplicationsMapService: makeStubApplicationsMapService, - }); - - const sqsMessage: SQSRecord = { - messageId: "sqs-msg-id-mixed", - receiptHandle: "receipt-handle-mixed", - body: JSON.stringify(validMessageStatusEvent), - attributes: { - ApproximateReceiveCount: "1", - SentTimestamp: "1519211230", - SenderId: "ABCDEFGHIJ", - ApproximateFirstReceiveTimestamp: "1519211230", - }, - messageAttributes: {}, - md5OfBody: "mock-md5", - eventSource: "aws:sqs", - eventSourceARN: "arn:aws:sqs:eu-west-2:123456789:mock-queue", - awsRegion: "eu-west-2", - }; - - await expect(handlerWithMixedTargets([sqsMessage])).rejects.toThrow( - "Missing apiKey for target target-no-key", - ); - }); - it("should handle batch of SQS messages from EventBridge Pipes", async () => { const sqsMessages: SQSRecord[] = [ { @@ -414,7 +342,6 @@ describe("Lambda handler", () => { expect(result).toHaveLength(1); expect(result[0]).toHaveProperty("payload"); expect(result[0]).toHaveProperty("subscriptions"); - expect(result[0]).toHaveProperty("signatures"); const dataItem = result[0].payload.data[0]; expect(dataItem.type).toBe("ChannelStatus"); expect((dataItem.attributes as ChannelStatusAttributes).channelStatus).toBe( @@ -481,7 +408,6 @@ describe("Lambda handler", () => { const faultyHandler = createHandler({ createObservabilityService: () => faultyObservability, createConfigLoaderService: makeStubConfigLoaderService, - createApplicationsMapService: makeStubApplicationsMapService, }); const sqsMessage: SQSRecord = { @@ -662,7 +588,6 @@ describe("createHandler default wiring", () => { [], state.mockObservabilityInstance, expect.any(Object), - expect.any(Object), ); expect(result).toEqual(["ok"]); diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-cache.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/services/config-cache.test.ts deleted file mode 100644 index 6199b92c..00000000 --- a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-cache.test.ts +++ /dev/null @@ -1,66 +0,0 @@ -import type { ClientSubscriptionConfiguration } from "@nhs-notify-client-callbacks/models"; -import { - createClientSubscriptionConfig, - createMessageStatusSubscription, -} from "__tests__/helpers/client-subscription-fixtures"; -import { ConfigCache } from "services/config-cache"; - -const createConfig = (): ClientSubscriptionConfiguration => - createClientSubscriptionConfig("client-1", { - subscriptions: [ - createMessageStatusSubscription(["DELIVERED"], { targetIds: [] }), - ], - }); - -describe("ConfigCache", () => { - it("stores and retrieves configuration", () => { - const cache = new ConfigCache(60_000); - const config = createConfig(); - - cache.set("client-1", config); - - expect(cache.get("client-1")).toEqual(config); - }); - - it("returns undefined for non-existent key", () => { - const cache = new ConfigCache(60_000); - const result = cache.get("non-existent"); - - expect(result).toBeUndefined(); - }); - - it("returns undefined for expired entries", () => { - jest.useFakeTimers(); - jest.setSystemTime(new Date("2025-01-01T10:00:00Z")); - - const cache = new ConfigCache(1000); // 1 second TTL - const config = createConfig(); - - cache.set("client-1", config); - expect(cache.get("client-1")).toEqual(config); - - jest.advanceTimersByTime(1001); - - const result = cache.get("client-1"); - - expect(result).toBeUndefined(); - - jest.useRealTimers(); - }); - - it("clears all entries", () => { - const cache = new ConfigCache(60_000); - const config = createConfig(); - - cache.set("client-1", config); - cache.set("client-2", config); - - expect(cache.get("client-1")).toEqual(config); - expect(cache.get("client-2")).toEqual(config); - - cache.clear(); - - expect(cache.get("client-1")).toBeUndefined(); - expect(cache.get("client-2")).toBeUndefined(); - }); -}); diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader-service.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader-service.test.ts index a5741d2b..c907bb3f 100644 --- a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader-service.test.ts +++ b/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader-service.test.ts @@ -1,4 +1,5 @@ import { S3Client } from "@aws-sdk/client-s3"; +import { ConfigSubscriptionCache } from "@nhs-notify-client-callbacks/config-subscription-cache"; import { ConfigLoader } from "services/config-loader"; import { ConfigLoaderService, @@ -8,6 +9,7 @@ import { const mockS3Client = jest.mocked(S3Client); const mockConfigLoader = jest.mocked(ConfigLoader); +const mockConfigSubscriptionCache = jest.mocked(ConfigSubscriptionCache); jest.mock("@aws-sdk/client-s3", () => ({ S3Client: jest.fn(), @@ -17,12 +19,19 @@ jest.mock("services/config-loader", () => ({ ConfigLoader: jest.fn(), })); +jest.mock("@nhs-notify-client-callbacks/config-subscription-cache", () => ({ + ConfigSubscriptionCache: jest.fn().mockImplementation(() => ({ + reset: jest.fn(), + })), +})); + describe("ConfigLoaderService", () => { const originalBucket = process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET; const originalPrefix = process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX; beforeEach(() => { mockConfigLoader.mockClear(); + mockConfigSubscriptionCache.mockClear(); process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET = "test-bucket"; }); @@ -60,7 +69,7 @@ describe("ConfigLoaderService", () => { delete process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX; const service = new ConfigLoaderService(); service.getLoader(); - expect(mockConfigLoader).toHaveBeenCalledWith( + expect(mockConfigSubscriptionCache).toHaveBeenCalledWith( expect.objectContaining({ keyPrefix: "client_subscriptions/" }), ); }); @@ -69,7 +78,7 @@ describe("ConfigLoaderService", () => { process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX = "custom_prefix/"; const service = new ConfigLoaderService(); service.getLoader(); - expect(mockConfigLoader).toHaveBeenCalledWith( + expect(mockConfigSubscriptionCache).toHaveBeenCalledWith( expect.objectContaining({ keyPrefix: "custom_prefix/" }), ); }); @@ -90,7 +99,6 @@ describe("ConfigLoaderService", () => { }); const service = new ConfigLoaderService(); service.reset(customClient); - // Should not throw and the loader should be available immediately expect(() => service.getLoader()).not.toThrow(); }); @@ -101,7 +109,7 @@ describe("ConfigLoaderService", () => { }); const service = new ConfigLoaderService(); service.reset(customClient); - expect(mockConfigLoader).toHaveBeenCalledWith( + expect(mockConfigSubscriptionCache).toHaveBeenCalledWith( expect.objectContaining({ keyPrefix: "custom_prefix/" }), ); }); diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader.test.ts index 495164fb..c9fecdce 100644 --- a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader.test.ts +++ b/lambdas/client-transform-filter-lambda/src/__tests__/services/config-loader.test.ts @@ -1,6 +1,6 @@ import { GetObjectCommand, NoSuchKey, S3Client } from "@aws-sdk/client-s3"; import { createMessageStatusConfig } from "__tests__/helpers/client-subscription-fixtures"; -import { ConfigCache } from "services/config-cache"; +import { ConfigSubscriptionCache } from "@nhs-notify-client-callbacks/config-subscription-cache"; import { ConfigLoader } from "services/config-loader"; import { ConfigValidationError } from "services/validators/config-validator"; @@ -13,6 +13,15 @@ jest.mock("services/logger", () => ({ }, })); +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + debug: jest.fn(), + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + }, +})); + const mockBody = (json: string) => ({ transformToString: jest.fn().mockResolvedValue(json), }); @@ -20,13 +29,15 @@ const mockBody = (json: string) => ({ const createValidConfig = (clientId: string) => createMessageStatusConfig(["DELIVERED"], clientId); -const createLoader = (send: jest.Mock) => - new ConfigLoader({ +const createLoader = (send: jest.Mock) => { + const cache = new ConfigSubscriptionCache({ + s3Client: { send } as unknown as S3Client, bucketName: "bucket", keyPrefix: "client_subscriptions/", - s3Client: { send } as unknown as S3Client, - cache: new ConfigCache(60_000), + ttlMs: 60_000, }); + return new ConfigLoader(cache); +}; describe("ConfigLoader", () => { it("loads and validates client configuration from S3", async () => { diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-update.component.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/services/config-update.component.test.ts index 81af7f04..487e6130 100644 --- a/lambdas/client-transform-filter-lambda/src/__tests__/services/config-update.component.test.ts +++ b/lambdas/client-transform-filter-lambda/src/__tests__/services/config-update.component.test.ts @@ -1,8 +1,17 @@ import { S3Client } from "@aws-sdk/client-s3"; import { createMessageStatusConfig } from "__tests__/helpers/client-subscription-fixtures"; -import { ConfigCache } from "services/config-cache"; +import { ConfigSubscriptionCache } from "@nhs-notify-client-callbacks/config-subscription-cache"; import { ConfigLoader } from "services/config-loader"; +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + debug: jest.fn(), + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + }, +})); + const makeConfig = (messageStatuses: string[]) => createMessageStatusConfig(messageStatuses as never); @@ -28,12 +37,13 @@ describe("config update component", () => { }, }); - const loader = new ConfigLoader({ + const cache = new ConfigSubscriptionCache({ + s3Client: { send } as unknown as S3Client, bucketName: "bucket", keyPrefix: "client_subscriptions/", - s3Client: { send } as unknown as S3Client, - cache: new ConfigCache(1000), + ttlMs: 1000, }); + const loader = new ConfigLoader(cache); const first = await loader.loadClientConfig("client-1"); const firstMessage = first?.subscriptions.find( diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/services/payload-signer.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/services/payload-signer.test.ts deleted file mode 100644 index e1785d55..00000000 --- a/lambdas/client-transform-filter-lambda/src/__tests__/services/payload-signer.test.ts +++ /dev/null @@ -1,49 +0,0 @@ -import { createHmac } from "node:crypto"; -import type { ClientCallbackPayload } from "@nhs-notify-client-callbacks/models"; -import { signPayload } from "services/payload-signer"; - -const makePayload = (id = "msg-1") => - ({ data: [{ id }] }) as unknown as ClientCallbackPayload; - -describe("signPayload", () => { - it("produces the expected HMAC-SHA256 hex string", () => { - const payload = makePayload(); - const applicationId = "app-id-1"; - const apiKey = "api-key-1"; - - const expected = createHmac("sha256", `${applicationId}.${apiKey}`) - .update(JSON.stringify(payload)) - .digest("hex"); - - expect(signPayload(payload, applicationId, apiKey)).toBe(expected); - }); - - it("returns a non-empty hex string", () => { - const result = signPayload(makePayload(), "app-id", "api-key"); - expect(result).toMatch(/^[0-9a-f]+$/); - }); - - it("produces different signatures for different payloads", () => { - const apiKey = "key"; - const appId = "app"; - expect(signPayload(makePayload("msg-1"), appId, apiKey)).not.toBe( - signPayload(makePayload("msg-2"), appId, apiKey), - ); - }); - - it("produces different signatures for different applicationIds", () => { - const payload = makePayload(); - const apiKey = "key"; - expect(signPayload(payload, "app-1", apiKey)).not.toBe( - signPayload(payload, "app-2", apiKey), - ); - }); - - it("produces different signatures for different apiKeys", () => { - const payload = makePayload(); - const appId = "app"; - expect(signPayload(payload, appId, "key-1")).not.toBe( - signPayload(payload, appId, "key-2"), - ); - }); -}); diff --git a/lambdas/client-transform-filter-lambda/src/__tests__/services/ssm-applications-map.test.ts b/lambdas/client-transform-filter-lambda/src/__tests__/services/ssm-applications-map.test.ts deleted file mode 100644 index 7123009a..00000000 --- a/lambdas/client-transform-filter-lambda/src/__tests__/services/ssm-applications-map.test.ts +++ /dev/null @@ -1,156 +0,0 @@ -import { GetParameterCommand, SSMClient } from "@aws-sdk/client-ssm"; -import { - ApplicationsMapService, - createSsmClient, - resolveCacheTtlMs, -} from "services/ssm-applications-map"; - -jest.mock("services/logger", () => ({ - logger: { - debug: jest.fn(), - info: jest.fn(), - warn: jest.fn(), - error: jest.fn(), - }, -})); - -const makeSsmClient = (value: string | undefined) => - ({ - send: jest - .fn() - .mockResolvedValue( - value === undefined ? {} : { Parameter: { Value: value } }, - ), - }) as unknown as SSMClient; - -describe("ApplicationsMapService", () => { - beforeEach(() => { - jest.useFakeTimers(); - }); - - afterEach(() => { - jest.useRealTimers(); - }); - - it("returns the applicationId for a known clientId", async () => { - const ssmClient = makeSsmClient( - JSON.stringify({ "client-1": "app-id-1", "client-2": "app-id-2" }), - ); - const service = new ApplicationsMapService(ssmClient, "/test/param"); - - expect(await service.getApplicationId("client-1")).toBe("app-id-1"); - }); - - it("returns undefined for an unknown clientId", async () => { - const ssmClient = makeSsmClient(JSON.stringify({ "client-1": "app-id-1" })); - const service = new ApplicationsMapService(ssmClient, "/test/param"); - - expect(await service.getApplicationId("unknown")).toBeUndefined(); - }); - - it("loads from SSM and sends GetParameterCommand with WithDecryption", async () => { - const ssmClient = makeSsmClient(JSON.stringify({ "client-1": "app-id-1" })); - const service = new ApplicationsMapService(ssmClient, "/test/param"); - - await service.getApplicationId("client-1"); - - expect(ssmClient.send).toHaveBeenCalledTimes(1); - expect((ssmClient.send as jest.Mock).mock.calls[0][0]).toBeInstanceOf( - GetParameterCommand, - ); - }); - - it("caches the map and does not call SSM again within TTL", async () => { - const ssmClient = makeSsmClient(JSON.stringify({ "client-1": "app-id-1" })); - const service = new ApplicationsMapService(ssmClient, "/test/param", 5000); - - await service.getApplicationId("client-1"); - await service.getApplicationId("client-1"); - - expect(ssmClient.send).toHaveBeenCalledTimes(1); - }); - - it("reloads from SSM after TTL expires", async () => { - const ssmClient = makeSsmClient(JSON.stringify({ "client-1": "app-id-1" })); - const service = new ApplicationsMapService(ssmClient, "/test/param", 5000); - - await service.getApplicationId("client-1"); - jest.advanceTimersByTime(6000); - await service.getApplicationId("client-1"); - - expect(ssmClient.send).toHaveBeenCalledTimes(2); - }); - - it("throws when SSM parameter is missing", async () => { - const ssmClient = makeSsmClient(undefined); - const service = new ApplicationsMapService(ssmClient, "/test/param"); - - await expect(service.getApplicationId("client-1")).rejects.toThrow( - "SSM parameter '/test/param' not found or has no value", - ); - }); - - it("throws when APPLICATIONS_MAP_PARAMETER is not set", async () => { - const ssmClient = makeSsmClient(JSON.stringify({ "client-1": "app-id-1" })); - const service = new ApplicationsMapService(ssmClient, undefined); - - await expect(service.getApplicationId("client-1")).rejects.toThrow( - "APPLICATIONS_MAP_PARAMETER is required", - ); - }); - - it("throws when SSM parameter has empty value", async () => { - const ssmClient = { - send: jest.fn().mockResolvedValue({ Parameter: { Value: "" } }), - } as unknown as SSMClient; - const service = new ApplicationsMapService(ssmClient, "/test/param"); - - await expect(service.getApplicationId("client-1")).rejects.toThrow( - "SSM parameter '/test/param' not found or has no value", - ); - }); - - it("throws when SSM parameter contains invalid JSON", async () => { - const ssmClient = makeSsmClient("not valid json"); - const service = new ApplicationsMapService(ssmClient, "/test/param"); - - await expect(service.getApplicationId("client-1")).rejects.toThrow( - "SSM parameter '/test/param' contains invalid JSON", - ); - }); - - it("reset clears the cache and forces reload on next call", async () => { - const ssmClient = makeSsmClient(JSON.stringify({ "client-1": "app-id-1" })); - const service = new ApplicationsMapService(ssmClient, "/test/param", 5000); - - await service.getApplicationId("client-1"); - service.reset(); - await service.getApplicationId("client-1"); - - expect(ssmClient.send).toHaveBeenCalledTimes(2); - }); -}); - -describe("resolveCacheTtlMs", () => { - it("returns configured value in ms", () => { - expect( - resolveCacheTtlMs({ APPLICATIONS_MAP_CACHE_TTL_SECONDS: "30" }), - ).toBe(30_000); - }); - - it("returns default when env var is absent", () => { - expect(resolveCacheTtlMs({})).toBe(60_000); - }); - - it("returns default when env var is not a valid number", () => { - expect( - resolveCacheTtlMs({ APPLICATIONS_MAP_CACHE_TTL_SECONDS: "invalid" }), - ).toBe(60_000); - }); -}); - -describe("createSsmClient", () => { - it("returns an SSMClient instance", () => { - expect(createSsmClient({})).toBeInstanceOf(SSMClient); - }); -}); diff --git a/lambdas/client-transform-filter-lambda/src/handler.ts b/lambdas/client-transform-filter-lambda/src/handler.ts index 0d1f20b6..be05991c 100644 --- a/lambdas/client-transform-filter-lambda/src/handler.ts +++ b/lambdas/client-transform-filter-lambda/src/handler.ts @@ -7,13 +7,11 @@ import type { } from "@nhs-notify-client-callbacks/models"; import { validateStatusPublishEvent } from "services/validators/event-validator"; import { transformEvent } from "services/transformers/event-transformer"; -import { extractCorrelationId, logger } from "services/logger"; +import { extractCorrelationId } from "services/logger"; import { ValidationError, getEventError } from "services/error-handler"; import type { ObservabilityService } from "services/observability"; import type { ConfigLoader } from "services/config-loader"; import { evaluateSubscriptionFilters } from "services/subscription-filter"; -import type { ApplicationsMapService } from "services/ssm-applications-map"; -import { signPayload } from "services/payload-signer"; const BATCH_CONCURRENCY = Number(process.env.BATCH_CONCURRENCY) || 10; const MESSAGE_ROOT_URI = process.env.MESSAGE_ROOT_URI ?? ""; @@ -27,20 +25,9 @@ type FilteredEvent = UnsignedEvent & { targetIds: string[]; }; -type SignedEvent = { - transformedEvent: TransformedEvent; - deliveryContext: { - correlationId: string; - eventType: string; - clientId: string; - messageId: string; - }; -}; - export interface TransformedEvent { payload: ClientCallbackPayload; subscriptions: string[]; - signatures: Record; } class BatchStats { @@ -140,79 +127,6 @@ function processSingleEvent( type ClientConfigMap = Map; -async function signBatch( - filteredEvents: FilteredEvent[], - applicationsMapService: ApplicationsMapService, - configByClientId: ClientConfigMap, - stats: BatchStats, - observability: ObservabilityService, -): Promise { - const results = await pMap( - filteredEvents, - async (event): Promise => { - const { clientId } = event.data; - const correlationId = extractCorrelationId(event) ?? event.id; - - const applicationId = - await applicationsMapService.getApplicationId(clientId); - if (!applicationId) { - stats.recordFiltered(); - logger.warn( - "No applicationId found in SSM map - event will not be delivered", - { clientId, correlationId }, - ); - return undefined; - } - - const clientConfig = configByClientId.get(clientId); - const targetsById = new Map( - (clientConfig?.targets ?? []).map((t) => [t.targetId, t]), - ); - - const signaturesByTarget = new Map(); - - for (const targetId of event.targetIds) { - const target = targetsById.get(targetId); - const apiKey = target?.apiKey?.headerValue; - if (!apiKey) { - throw new ValidationError( - `Missing apiKey for target ${targetId}`, - correlationId, - ); - } - const signature = signPayload( - event.transformedPayload, - applicationId, - apiKey, - ); - signaturesByTarget.set(targetId.replaceAll("-", "_"), signature); - observability.recordCallbackSigned( - event.transformedPayload, - correlationId, - clientId, - signature, - ); - } - - return { - transformedEvent: { - payload: event.transformedPayload, - subscriptions: event.subscriptionIds, - signatures: Object.fromEntries(signaturesByTarget), - }, - deliveryContext: { - correlationId, - eventType: event.type, - clientId, - messageId: event.data.messageId, - }, - }; - }, - { concurrency: BATCH_CONCURRENCY }, - ); - return results.filter((e): e is SignedEvent => e !== undefined); -} - async function loadClientConfigs( events: UnsignedEvent[], configLoader: ConfigLoader, @@ -304,7 +218,6 @@ export async function processEvents( event: SQSRecord[], observability: ObservabilityService, configLoader: ConfigLoader, - applicationsMapService: ApplicationsMapService, ): Promise { const startTime = Date.now(); const stats = new BatchStats(); @@ -324,20 +237,21 @@ export async function processEvents( stats, ); - const signedEvents = await signBatch( - filteredEvents, - applicationsMapService, - configByClientId, - stats, - observability, - ); - - for (const signedEvent of signedEvents) { - observability.recordDeliveryInitiated(signedEvent.deliveryContext); - } + const deliverableEvents: TransformedEvent[] = filteredEvents.map( + (filteredEvent) => { + const correlationId = extractCorrelationId(filteredEvent); + observability.recordDeliveryInitiated({ + correlationId, + eventType: filteredEvent.type, + clientId: filteredEvent.data.clientId, + messageId: filteredEvent.data.messageId, + }); - const deliverableEvents = signedEvents.map( - (signedEvent) => signedEvent.transformedEvent, + return { + payload: filteredEvent.transformedPayload, + subscriptions: filteredEvent.subscriptionIds, + }; + }, ); const processingTime = Date.now() - startTime; diff --git a/lambdas/client-transform-filter-lambda/src/index.ts b/lambdas/client-transform-filter-lambda/src/index.ts index 9d631bfe..5ef8e197 100644 --- a/lambdas/client-transform-filter-lambda/src/index.ts +++ b/lambdas/client-transform-filter-lambda/src/index.ts @@ -3,17 +3,13 @@ import { Logger } from "services/logger"; import { CallbackMetrics, createMetricLogger } from "services/metrics"; import { ObservabilityService } from "services/observability"; import { ConfigLoaderService } from "services/config-loader-service"; -import { ApplicationsMapService } from "services/ssm-applications-map"; import { type TransformedEvent, processEvents } from "handler"; export const configLoaderService = new ConfigLoaderService(); -export const applicationsMapService = new ApplicationsMapService(); - export interface HandlerDependencies { createObservabilityService?: () => ObservabilityService; createConfigLoaderService?: () => ConfigLoaderService; - createApplicationsMapService?: () => ApplicationsMapService; } function createDefaultObservabilityService(): ObservabilityService { @@ -28,10 +24,6 @@ function createDefaultConfigLoaderService(): ConfigLoaderService { return configLoaderService; } -function createDefaultApplicationsMapService(): ApplicationsMapService { - return applicationsMapService; -} - export function createHandler( dependencies: Partial = {}, ): (event: SQSRecord[]) => Promise { @@ -41,19 +33,10 @@ export function createHandler( const configLoader = ( dependencies.createConfigLoaderService ?? createDefaultConfigLoaderService )(); - const applicationsMap = ( - dependencies.createApplicationsMapService ?? - createDefaultApplicationsMapService - )(); return async (event: SQSRecord[]): Promise => { const observability = createObservabilityService(); - return processEvents( - event, - observability, - configLoader.getLoader(), - applicationsMap, - ); + return processEvents(event, observability, configLoader.getLoader()); }; } diff --git a/lambdas/client-transform-filter-lambda/src/services/config-loader-service.ts b/lambdas/client-transform-filter-lambda/src/services/config-loader-service.ts index b0af71b0..b5542d01 100644 --- a/lambdas/client-transform-filter-lambda/src/services/config-loader-service.ts +++ b/lambdas/client-transform-filter-lambda/src/services/config-loader-service.ts @@ -1,5 +1,5 @@ import { S3Client } from "@aws-sdk/client-s3"; -import { ConfigCache } from "services/config-cache"; +import { ConfigSubscriptionCache } from "@nhs-notify-client-callbacks/config-subscription-cache"; import { ConfigLoader } from "services/config-loader"; const DEFAULT_CACHE_TTL_SECONDS = 60; @@ -26,52 +26,49 @@ export const createS3Client = ( }; export class ConfigLoaderService { - private readonly cache: ConfigCache; - private loader: ConfigLoader | undefined; + private cache: ConfigSubscriptionCache | undefined; + + private readonly ttlMs: number; + constructor(cacheTtlMs: number = resolveCacheTtlMs()) { - this.cache = new ConfigCache(cacheTtlMs); + this.ttlMs = cacheTtlMs; } getLoader(): ConfigLoader { - const bucketName = process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET; - if (!bucketName) { - throw new Error("CLIENT_SUBSCRIPTION_CONFIG_BUCKET is required"); - } - if (this.loader) { return this.loader; } - this.loader = new ConfigLoader({ - bucketName, - keyPrefix: - process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX ?? - "client_subscriptions/", - s3Client: createS3Client(), - cache: this.cache, - }); - + this.cache = this.createCache(createS3Client()); + this.loader = new ConfigLoader(this.cache); return this.loader; } reset(s3Client?: S3Client): void { + this.cache?.reset(); this.loader = undefined; - this.cache.clear(); + this.cache = undefined; if (s3Client) { - const bucketName = process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET; - if (!bucketName) { - throw new Error("CLIENT_SUBSCRIPTION_CONFIG_BUCKET is required"); - } - this.loader = new ConfigLoader({ - bucketName, - keyPrefix: - process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX ?? - "client_subscriptions/", - s3Client, - cache: this.cache, - }); + this.cache = this.createCache(s3Client); + this.loader = new ConfigLoader(this.cache); + } + } + + private createCache(s3Client: S3Client): ConfigSubscriptionCache { + const bucketName = process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET; + if (!bucketName) { + throw new Error("CLIENT_SUBSCRIPTION_CONFIG_BUCKET is required"); } + + return new ConfigSubscriptionCache({ + s3Client, + bucketName, + keyPrefix: + process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX ?? + "client_subscriptions/", + ttlMs: this.ttlMs, + }); } } diff --git a/lambdas/client-transform-filter-lambda/src/services/config-loader.ts b/lambdas/client-transform-filter-lambda/src/services/config-loader.ts index 2d5b388f..0b272774 100644 --- a/lambdas/client-transform-filter-lambda/src/services/config-loader.ts +++ b/lambdas/client-transform-filter-lambda/src/services/config-loader.ts @@ -1,82 +1,21 @@ -import { GetObjectCommand, NoSuchKey, S3Client } from "@aws-sdk/client-s3"; +import type { ConfigSubscriptionCache } from "@nhs-notify-client-callbacks/config-subscription-cache"; import type { ClientSubscriptionConfiguration } from "@nhs-notify-client-callbacks/models"; -import { ConfigCache } from "services/config-cache"; import { logger } from "services/logger"; import { wrapUnknownError } from "services/error-handler"; -import { - ConfigValidationError, - validateClientConfig, -} from "services/validators/config-validator"; - -type ConfigLoaderOptions = { - bucketName: string; - keyPrefix: string; - s3Client: S3Client; - cache: ConfigCache; -}; - -function throwAsConfigError(error: unknown, clientId: string): never { - if (error instanceof ConfigValidationError) { - logger.error("Config validation failed with schema violations", { - clientId, - validationErrors: error.issues, - }); - throw error; - } - - const { message } = wrapUnknownError(error); - logger.error("Failed to load config from S3", { clientId }); - throw new ConfigValidationError([{ path: "config", message }]); -} +import { ConfigValidationError } from "services/validators/config-validator"; export class ConfigLoader { - constructor(private readonly options: ConfigLoaderOptions) {} + constructor(private readonly cache: ConfigSubscriptionCache) {} async loadClientConfig( clientId: string, ): Promise { - const cached = this.options.cache.get(clientId); - if (cached) { - logger.debug("Config loaded from cache", { clientId, cacheHit: true }); - return cached; - } - - logger.debug("Config not in cache, fetching from S3", { - clientId, - cacheHit: false, - }); - try { - const response = await this.options.s3Client.send( - new GetObjectCommand({ - Bucket: this.options.bucketName, - Key: `${this.options.keyPrefix}${clientId}.json`, - }), - ); - - if (!response.Body) { - throw new Error("S3 response body was empty"); - } - - const rawConfig = await response.Body.transformToString(); - const parsedConfig = JSON.parse(rawConfig) as unknown; - const validated = validateClientConfig(parsedConfig); - this.options.cache.set(clientId, validated); - logger.info("Config loaded successfully from S3", { - clientId, - subscriptionCount: validated.subscriptions.length, - }); - return validated; + return await this.cache.loadClientConfig(clientId); } catch (error) { - if (error instanceof NoSuchKey) { - logger.info( - "No config found in S3 for client - events will be filtered out", - { clientId }, - ); - return undefined; - } - throwAsConfigError(error, clientId); - return undefined; + const { message } = wrapUnknownError(error); + logger.error("Failed to load config", { clientId }); + throw new ConfigValidationError([{ path: "config", message }]); } } } diff --git a/lambdas/client-transform-filter-lambda/src/services/observability.ts b/lambdas/client-transform-filter-lambda/src/services/observability.ts index 4cfbf469..efd55eea 100644 --- a/lambdas/client-transform-filter-lambda/src/services/observability.ts +++ b/lambdas/client-transform-filter-lambda/src/services/observability.ts @@ -1,9 +1,6 @@ import type { MetricsLogger } from "aws-embedded-metrics"; import type { ClientCallbackPayload } from "@nhs-notify-client-callbacks/models"; -import { - logCallbackGenerated, - logCallbackSigned, -} from "services/callback-logger"; +import { logCallbackGenerated } from "services/callback-logger"; import type { Logger } from "services/logger"; import { logLifecycleEvent } from "services/logger"; import type { CallbackMetrics } from "services/metrics"; @@ -95,15 +92,6 @@ export class ObservabilityService { this.metrics.emitTransformationSuccess(); } - recordCallbackSigned( - payload: ClientCallbackPayload, - correlationId: string | undefined, - clientId: string, - signature: string, - ): void { - logCallbackSigned(this.logger, payload, correlationId, clientId, signature); - } - createChild(context: { correlationId?: string; eventType: string; diff --git a/lambdas/client-transform-filter-lambda/src/services/ssm-applications-map.ts b/lambdas/client-transform-filter-lambda/src/services/ssm-applications-map.ts deleted file mode 100644 index 87cead24..00000000 --- a/lambdas/client-transform-filter-lambda/src/services/ssm-applications-map.ts +++ /dev/null @@ -1,85 +0,0 @@ -import { GetParameterCommand, SSMClient } from "@aws-sdk/client-ssm"; -import { logger } from "services/logger"; - -const DEFAULT_CACHE_TTL_SECONDS = 60; - -export const createSsmClient = ( - env: NodeJS.ProcessEnv = process.env, -): SSMClient => { - const endpoint = env.AWS_ENDPOINT_URL; - return new SSMClient({ endpoint }); -}; - -export const resolveCacheTtlMs = ( - env: NodeJS.ProcessEnv = process.env, -): number => { - const ttlSeconds = Number.parseInt( - env.APPLICATIONS_MAP_CACHE_TTL_SECONDS ?? `${DEFAULT_CACHE_TTL_SECONDS}`, - 10, - ); - return ( - (Number.isFinite(ttlSeconds) ? ttlSeconds : DEFAULT_CACHE_TTL_SECONDS) * - 1000 - ); -}; - -export class ApplicationsMapService { - private cachedMap: Map | undefined; - - private cacheExpiresAt = 0; - - constructor( - private readonly ssmClient: SSMClient = createSsmClient(), - private readonly parameterName: string | undefined = process.env - .APPLICATIONS_MAP_PARAMETER, - private readonly cacheTtlMs: number = resolveCacheTtlMs(), - ) {} - - async getApplicationId(clientId: string): Promise { - const map = await this.getMap(); - return map.get(clientId); - } - - private async getMap(): Promise> { - if (!this.parameterName) { - throw new Error("APPLICATIONS_MAP_PARAMETER is required"); - } - const { parameterName } = this; - - if (this.cachedMap && Date.now() < this.cacheExpiresAt) { - logger.debug("Applications map loaded from cache"); - return this.cachedMap; - } - - const response = await this.ssmClient.send( - new GetParameterCommand({ - Name: parameterName, - WithDecryption: true, - }), - ); - - if (!response.Parameter?.Value) { - throw new Error( - `SSM parameter '${parameterName}' not found or has no value`, - ); - } - - let parsed: Record; - try { - parsed = JSON.parse(response.Parameter.Value) as Record; - } catch { - throw new Error(`SSM parameter '${parameterName}' contains invalid JSON`); - } - this.cachedMap = new Map(Object.entries(parsed)); - this.cacheExpiresAt = Date.now() + this.cacheTtlMs; - logger.info("Applications map loaded from SSM", { - parameterName, - }); - return this.cachedMap; - } - - reset(): void { - this.cachedMap = undefined; - this.cacheExpiresAt = 0; - } -} diff --git a/lambdas/https-client-lambda/jest.config.ts b/lambdas/https-client-lambda/jest.config.ts new file mode 100644 index 00000000..cd0ed08e --- /dev/null +++ b/lambdas/https-client-lambda/jest.config.ts @@ -0,0 +1,9 @@ +import { nodeJestConfig } from "../../jest.config.base.ts"; + +export default { + ...nodeJestConfig, + transform: { + ...nodeJestConfig.transform, + "\\.lua$": "/lua-transform.js", + }, +}; diff --git a/lambdas/https-client-lambda/lua-transform.js b/lambdas/https-client-lambda/lua-transform.js new file mode 100644 index 00000000..e6e0a1c9 --- /dev/null +++ b/lambdas/https-client-lambda/lua-transform.js @@ -0,0 +1,7 @@ +module.exports = { + process(sourceText) { + return { + code: `module.exports = ${JSON.stringify(sourceText)};`, + }; + }, +}; diff --git a/lambdas/https-client-lambda/package.json b/lambdas/https-client-lambda/package.json new file mode 100644 index 00000000..bc08ca4b --- /dev/null +++ b/lambdas/https-client-lambda/package.json @@ -0,0 +1,43 @@ +{ + "dependencies": { + "@aws-crypto/sha256-js": "catalog:aws", + "@aws-sdk/client-s3": "catalog:aws", + "@aws-sdk/client-secrets-manager": "catalog:aws", + "@aws-sdk/client-sqs": "catalog:aws", + "@aws-sdk/client-ssm": "catalog:aws", + "@aws-sdk/credential-providers": "catalog:aws", + "@smithy/signature-v4": "catalog:aws", + "@nhs-notify-client-callbacks/config-subscription-cache": "workspace:*", + "@nhs-notify-client-callbacks/logger": "workspace:*", + "@nhs-notify-client-callbacks/models": "workspace:*", + "@redis/client": "catalog:app", + "aws-embedded-metrics": "catalog:app", + "esbuild": "catalog:tools", + "node-forge": "catalog:app", + "p-map": "catalog:app" + }, + "devDependencies": { + "@tsconfig/node22": "catalog:tools", + "@types/aws-lambda": "catalog:tools", + "@types/jest": "catalog:test", + "@types/node": "catalog:tools", + "@types/node-forge": "catalog:tools", + "eslint": "catalog:lint", + "fengari": "^0.1.5", + "jest": "catalog:test", + "typescript": "catalog:tools" + }, + "engines": { + "node": ">=24.14.1" + }, + "name": "@nhs-notify-client-callbacks/https-client-lambda", + "private": true, + "scripts": { + "lambda-build": "rm -rf dist && pnpm exec esbuild --bundle --minify --sourcemap --target=es2020 --platform=node --loader:.node=file --loader:.lua=text --entry-names=[name] --outdir=dist src/index.ts", + "lint": "eslint .", + "lint:fix": "eslint . --fix", + "test:unit": "jest", + "typecheck": "tsc --noEmit" + }, + "version": "0.0.1" +} diff --git a/lambdas/https-client-lambda/src/__tests__/admit-lua.test.ts b/lambdas/https-client-lambda/src/__tests__/admit-lua.test.ts new file mode 100644 index 00000000..6aab4727 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/admit-lua.test.ts @@ -0,0 +1,460 @@ +import admitLuaSrc from "services/admit.lua"; +import { createRedisStore, evalLua } from "__tests__/helpers/lua-redis-mock"; + +// ARGV: [now, capacity, refillPerSec, cooldownMs, decayPeriodMs, cbWindowPeriodMs, cbProbeIntervalMs] +// KEYS: [cbKey, rlKey] +// Returns: [allowed (0|1), reason, retryAfterMs, effectiveRate] + +type AdmitArgs = { + now: number; + capacity: number; + refillPerSec: number; + cooldownMs: number; + decayPeriodMs: number; + cbWindowPeriodMs: number; + cbProbeIntervalMs: number; +}; + +const defaultArgs: AdmitArgs = { + now: 1_000_000, + capacity: 10, + refillPerSec: 10, + cooldownMs: 60_000, + decayPeriodMs: 300_000, + cbWindowPeriodMs: 60_000, + cbProbeIntervalMs: 60_000, +}; + +type AdmitResult = { + allowed: number; + reason: string; + retryAfterMs: number; + effectiveRate: number; +}; + +function runAdmit( + store: ReturnType, + args: Partial = {}, + targetId = "t1", +): AdmitResult { + const merged = { ...defaultArgs, ...args }; + const raw = evalLua( + admitLuaSrc, + [`cb:${targetId}`, `rl:${targetId}`], + [ + merged.now.toString(), + merged.capacity.toString(), + merged.refillPerSec.toString(), + merged.cooldownMs.toString(), + merged.decayPeriodMs.toString(), + merged.cbWindowPeriodMs.toString(), + merged.cbProbeIntervalMs.toString(), + ], + store, + ) as [number, string, number, number]; + return { + allowed: raw[0], + reason: raw[1], + retryAfterMs: raw[2], + effectiveRate: raw[3], + }; +} + +describe("admit.lua", () => { + describe("rate limiting", () => { + it("allows the first request with full token bucket", () => { + const store = createRedisStore(); + const { allowed, effectiveRate, reason, retryAfterMs } = runAdmit(store); + + expect(allowed).toBe(1); + expect(reason).toBe("allowed"); + expect(retryAfterMs).toBe(0); + expect(effectiveRate).toBe(10); + }); + + it("depletes tokens on consecutive calls and rejects when empty", () => { + const store = createRedisStore(); + + for (let i = 0; i < 10; i++) { + const { allowed } = runAdmit(store); + expect(allowed).toBe(1); + } + + const { allowed, reason } = runAdmit(store); + expect(allowed).toBe(0); + expect(reason).toBe("rate_limited"); + }); + + it("returns retryAfterMs when rate limited", () => { + const store = createRedisStore(); + + for (let i = 0; i < 10; i++) { + runAdmit(store); + } + + const { retryAfterMs } = runAdmit(store); + expect(retryAfterMs).toBe(1000); + }); + + it("reports effective rate when rate limited", () => { + const store = createRedisStore(); + + for (let i = 0; i < 10; i++) { + runAdmit(store); + } + + const { effectiveRate } = runAdmit(store); + expect(effectiveRate).toBe(10); + }); + + it("refills tokens over time", () => { + const store = createRedisStore(); + const now = 1_000_000; + + for (let i = 0; i < 10; i++) { + runAdmit(store, { now }); + } + + const denied = runAdmit(store, { now }); + expect(denied.allowed).toBe(0); + + const refilled = runAdmit(store, { now: now + 1000 }); + expect(refilled.allowed).toBe(1); + }); + + it("caps tokens at capacity", () => { + const store = createRedisStore(); + const now = 1_000_000; + + runAdmit(store, { now, capacity: 5, refillPerSec: 100 }); + + // Advance 10 seconds — would add 1000 tokens without cap + runAdmit(store, { now: now + 10_000, capacity: 5, refillPerSec: 100 }); + + const rlHash = store.get("rl:t1")!; + // Refill capped to capacity (5), then one consumed → 4 + expect(Number(rlHash.get("tokens"))).toBe(4); + }); + + it("handles zero refill rate", () => { + const store = createRedisStore(); + + for (let i = 0; i < 10; i++) { + runAdmit(store, { refillPerSec: 0 }); + } + + const { allowed, reason, retryAfterMs } = runAdmit(store, { + refillPerSec: 0, + }); + expect(allowed).toBe(0); + expect(reason).toBe("rate_limited"); + expect(retryAfterMs).toBe(1000); + }); + }); + + describe("circuit breaker", () => { + it("rejects when circuit is open", () => { + const store = createRedisStore(); + const now = 1_000_000; + const openedUntil = now + 60_000; + + store.set( + "cb:t1", + new Map([ + ["opened_until_ms", openedUntil.toString()], + ["last_probe_ms", now.toString()], + ]), + ); + + const { allowed, effectiveRate, reason } = runAdmit(store, { now }); + expect(allowed).toBe(0); + expect(reason).toBe("circuit_open"); + expect(effectiveRate).toBe(0); + }); + + it("returns retryAfterMs for open circuit", () => { + const store = createRedisStore(); + const now = 1_000_000; + const openedUntil = now + 30_000; + + store.set( + "cb:t1", + new Map([ + ["opened_until_ms", openedUntil.toString()], + ["last_probe_ms", now.toString()], + ]), + ); + + const { retryAfterMs } = runAdmit(store, { now }); + expect(retryAfterMs).toBe(30_000); + }); + + it("allows probe when probe interval has elapsed", () => { + const store = createRedisStore(); + const now = 1_000_000; + const openedUntil = now + 120_000; + const lastProbe = now - 61_000; + + store.set( + "cb:t1", + new Map([ + ["opened_until_ms", openedUntil.toString()], + ["last_probe_ms", lastProbe.toString()], + ]), + ); + + const { allowed, effectiveRate, reason, retryAfterMs } = runAdmit(store, { + now, + cbProbeIntervalMs: 60_000, + }); + expect(allowed).toBe(1); + expect(reason).toBe("probe"); + expect(retryAfterMs).toBe(0); + expect(effectiveRate).toBe(0); + }); + + it("updates last_probe_ms after allowing a probe", () => { + const store = createRedisStore(); + const now = 1_000_000; + const openedUntil = now + 120_000; + const lastProbe = now - 61_000; + + store.set( + "cb:t1", + new Map([ + ["opened_until_ms", openedUntil.toString()], + ["last_probe_ms", lastProbe.toString()], + ]), + ); + + runAdmit(store, { now, cbProbeIntervalMs: 60_000 }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("last_probe_ms")).toBe(now.toString()); + }); + + it("does not probe when interval has not elapsed", () => { + const store = createRedisStore(); + const now = 1_000_000; + const openedUntil = now + 120_000; + const lastProbe = now - 30_000; + + store.set( + "cb:t1", + new Map([ + ["opened_until_ms", openedUntil.toString()], + ["last_probe_ms", lastProbe.toString()], + ]), + ); + + const { allowed, reason } = runAdmit(store, { + now, + cbProbeIntervalMs: 60_000, + }); + expect(allowed).toBe(0); + expect(reason).toBe("circuit_open"); + }); + + it("does not probe when cbProbeIntervalMs is 0", () => { + const store = createRedisStore(); + const now = 1_000_000; + const openedUntil = now + 120_000; + + store.set( + "cb:t1", + new Map([ + ["opened_until_ms", openedUntil.toString()], + ["last_probe_ms", "0"], + ]), + ); + + const { allowed, reason } = runAdmit(store, { + now, + cbProbeIntervalMs: 0, + }); + expect(allowed).toBe(0); + expect(reason).toBe("circuit_open"); + }); + }); + + describe("sliding window", () => { + it("initialises cbWindowFrom on first call", () => { + const store = createRedisStore(); + const now = 1_000_000; + + runAdmit(store, { now }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("cb_window_from")).toBe(now.toString()); + }); + + it("rolls current window to previous when period expires", () => { + const store = createRedisStore(); + const cbWindowPeriodMs = 60_000; + const t0 = 1_000_000; + const t1 = t0 + cbWindowPeriodMs + 1; + + store.set( + "cb:t1", + new Map([ + ["cb_window_from", t0.toString()], + ["cb_failures", "5"], + ["cb_attempts", "10"], + ["cb_prev_failures", "0"], + ["cb_prev_attempts", "0"], + ]), + ); + + runAdmit(store, { now: t1, cbWindowPeriodMs }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("cb_prev_failures")).toBe("5"); + expect(cbHash.get("cb_prev_attempts")).toBe("10"); + expect(cbHash.get("cb_failures")).toBe("0"); + expect(cbHash.get("cb_attempts")).toBe("0"); + expect(cbHash.get("cb_window_from")).toBe(t1.toString()); + }); + + it("clears both windows when gap exceeds two periods", () => { + const store = createRedisStore(); + const cbWindowPeriodMs = 60_000; + const t0 = 1_000_000; + const t1 = t0 + 2 * cbWindowPeriodMs + 1; + + store.set( + "cb:t1", + new Map([ + ["cb_window_from", t0.toString()], + ["cb_failures", "5"], + ["cb_attempts", "10"], + ["cb_prev_failures", "3"], + ["cb_prev_attempts", "7"], + ]), + ); + + runAdmit(store, { now: t1, cbWindowPeriodMs }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("cb_prev_failures")).toBe("0"); + expect(cbHash.get("cb_prev_attempts")).toBe("0"); + expect(cbHash.get("cb_failures")).toBe("0"); + expect(cbHash.get("cb_attempts")).toBe("0"); + expect(cbHash.get("cb_window_from")).toBe(t1.toString()); + }); + }); + + describe("decay scaling", () => { + it("applies reduced rate during decay period", () => { + const store = createRedisStore(); + const closedAt = 1_000_000; + const decayPeriodMs = 300_000; + const halfwayThrough = closedAt + decayPeriodMs / 2; + + store.set("cb:t1", new Map([["opened_until_ms", closedAt.toString()]])); + + const { effectiveRate } = runAdmit(store, { + now: halfwayThrough, + refillPerSec: 10, + decayPeriodMs, + }); + expect(effectiveRate).toBe(5); + }); + + it("uses full rate after decay period ends", () => { + const store = createRedisStore(); + const closedAt = 1_000_000; + const decayPeriodMs = 300_000; + const afterDecay = closedAt + decayPeriodMs + 1; + + store.set("cb:t1", new Map([["opened_until_ms", closedAt.toString()]])); + + const { allowed, effectiveRate } = runAdmit(store, { + now: afterDecay, + refillPerSec: 10, + decayPeriodMs, + }); + expect(allowed).toBe(1); + expect(effectiveRate).toBe(10); + }); + + it("clamps minimum effective rate to 1", () => { + const store = createRedisStore(); + const closedAt = 1_000_000; + const decayPeriodMs = 300_000; + const veryEarly = closedAt + 1; + + store.set("cb:t1", new Map([["opened_until_ms", closedAt.toString()]])); + + const { effectiveRate } = runAdmit(store, { + now: veryEarly, + refillPerSec: 10, + decayPeriodMs, + }); + expect(effectiveRate).toBeGreaterThanOrEqual(1); + }); + + it("clears openedUntil when decay period fully elapses", () => { + const store = createRedisStore(); + const closedAt = 1_000_000; + const decayPeriodMs = 300_000; + const afterDecay = closedAt + decayPeriodMs + 1; + + store.set("cb:t1", new Map([["opened_until_ms", closedAt.toString()]])); + + runAdmit(store, { now: afterDecay, decayPeriodMs }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("opened_until_ms")).toBe("0"); + }); + + it("does not decay when decayPeriodMs is 0", () => { + const store = createRedisStore(); + const closedAt = 1_000_000; + + store.set("cb:t1", new Map([["opened_until_ms", closedAt.toString()]])); + + const { allowed, effectiveRate } = runAdmit(store, { + now: closedAt + 1, + refillPerSec: 10, + decayPeriodMs: 0, + }); + expect(allowed).toBe(1); + expect(effectiveRate).toBe(10); + }); + }); + + describe("state persistence", () => { + it("persists token count and last_refill_ms", () => { + const store = createRedisStore(); + runAdmit(store, { now: 1_000_000, capacity: 5 }); + + const rlHash = store.get("rl:t1")!; + expect(rlHash.get("tokens")).toBeDefined(); + expect(rlHash.get("last_refill_ms")).toBe("1000000"); + }); + + it("persists circuit breaker fields", () => { + const store = createRedisStore(); + runAdmit(store, { now: 1_000_000 }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.has("opened_until_ms")).toBe(true); + expect(cbHash.has("cb_window_from")).toBe(true); + expect(cbHash.has("cb_failures")).toBe(true); + expect(cbHash.has("cb_attempts")).toBe(true); + expect(cbHash.has("cb_prev_failures")).toBe(true); + expect(cbHash.has("cb_prev_attempts")).toBe(true); + }); + + it("isolates state between targets", () => { + const store = createRedisStore(); + runAdmit(store, {}, "target-a"); + runAdmit(store, {}, "target-b"); + + expect(store.has("cb:target-a")).toBe(true); + expect(store.has("cb:target-b")).toBe(true); + expect(store.has("rl:target-a")).toBe(true); + expect(store.has("rl:target-b")).toBe(true); + }); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/config-loader.test.ts b/lambdas/https-client-lambda/src/__tests__/config-loader.test.ts new file mode 100644 index 00000000..5c133ca6 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/config-loader.test.ts @@ -0,0 +1,201 @@ +import { GetObjectCommand } from "@aws-sdk/client-s3"; +import { ConfigSubscriptionCache } from "@nhs-notify-client-callbacks/config-subscription-cache"; + +import { loadTargetConfig, resetCache } from "services/config-loader"; + +const mockS3Send = jest.fn(); +jest.mock("@aws-sdk/client-s3", () => { + const actual = jest.requireActual("@aws-sdk/client-s3"); + return { + ...actual, + S3Client: jest.fn().mockImplementation(() => ({ + send: (...args: unknown[]) => mockS3Send(...args), + })), + }; +}); + +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + }, +})); + +process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET = "test-bucket"; +process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX = "client_subscriptions/"; +process.env.CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS = "1"; + +const VALID_TARGET = { + targetId: "target-1", + type: "API" as const, + invocationEndpoint: "https://webhook.example.invalid", + invocationMethod: "POST" as const, + invocationRateLimit: 10, + apiKey: { headerName: "x-api-key", headerValue: "secret" }, +}; + +const VALID_CONFIG = { + clientId: "client-1", + subscriptions: [], + targets: [VALID_TARGET], +}; + +const makeS3Response = (body: unknown) => ({ + Body: { + transformToString: jest.fn().mockResolvedValue(JSON.stringify(body)), + }, +}); + +describe("loadTargetConfig", () => { + beforeEach(() => { + mockS3Send.mockReset(); + resetCache(); + }); + + it("parses valid S3 config and returns the matching target", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + + const result = await loadTargetConfig("client-1", "target-1"); + + expect(result).toEqual(VALID_TARGET); + expect(mockS3Send).toHaveBeenCalledTimes(1); + expect(mockS3Send.mock.calls[0][0]).toBeInstanceOf(GetObjectCommand); + }); + + it("uses CLIENT_SUBSCRIPTION_CONFIG_PREFIX for the S3 key", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + + await loadTargetConfig("client-1", "target-1"); + + const command: GetObjectCommand = mockS3Send.mock.calls[0][0]; + expect(command.input.Key).toBe("client_subscriptions/client-1.json"); + }); + + it("rejects config missing required field", async () => { + const invalidConfig = { + ...VALID_CONFIG, + targets: [ + { + type: VALID_TARGET.type, + invocationEndpoint: VALID_TARGET.invocationEndpoint, + invocationMethod: VALID_TARGET.invocationMethod, + invocationRateLimit: VALID_TARGET.invocationRateLimit, + apiKey: VALID_TARGET.apiKey, + }, + ], + }; + mockS3Send.mockResolvedValue(makeS3Response(invalidConfig)); + + await expect(loadTargetConfig("client-1", "target-1")).rejects.toThrow( + "Invalid client config for 'client-1'", + ); + }); + + it("returns cached value without S3 call on subsequent requests", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + + await loadTargetConfig("client-1", "target-1"); + await loadTargetConfig("client-1", "target-1"); + + expect(mockS3Send).toHaveBeenCalledTimes(1); + }); + + it("re-fetches from S3 after TTL expiry", async () => { + jest.useFakeTimers(); + jest.setSystemTime(new Date("2026-01-01T10:00:00Z")); + + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + + await loadTargetConfig("client-1", "target-1"); + + jest.advanceTimersByTime(1001); + + await loadTargetConfig("client-1", "target-1"); + + expect(mockS3Send).toHaveBeenCalledTimes(2); + + jest.useRealTimers(); + }); + + it("throws when CLIENT_SUBSCRIPTION_CONFIG_BUCKET is not set", async () => { + let loadFn: typeof loadTargetConfig; + const saved = process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET; + delete process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET; + + jest.isolateModules(() => { + // eslint-disable-next-line @typescript-eslint/no-require-imports -- jest.isolateModules requires synchronous require + loadFn = require("services/config-loader").loadTargetConfig; + }); + + await expect(loadFn!("client-1", "target-1")).rejects.toThrow( + "CLIENT_SUBSCRIPTION_CONFIG_BUCKET is required", + ); + + process.env.CLIENT_SUBSCRIPTION_CONFIG_BUCKET = saved; + }); + + it("throws when S3 response body is empty", async () => { + mockS3Send.mockResolvedValue({ Body: undefined }); + + await expect(loadTargetConfig("client-1", "target-1")).rejects.toThrow( + "S3 response body was empty for client 'client-1'", + ); + }); + + it("throws when client config is not found", async () => { + mockS3Send.mockResolvedValue(makeS3Response(null)); + + await expect( + loadTargetConfig("unknown-client", "target-1"), + ).rejects.toThrow("Invalid client config for 'unknown-client'"); + }); + + it("throws when target not found in config", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + + await expect(loadTargetConfig("client-1", "nonexistent")).rejects.toThrow( + "Target 'nonexistent' not found in config for client 'client-1'", + ); + }); + + it("uses default prefix when CLIENT_SUBSCRIPTION_CONFIG_PREFIX is not set", async () => { + const saved = process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX; + delete process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX; + resetCache(); + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + + await loadTargetConfig("client-1", "target-1"); + + const command: GetObjectCommand = mockS3Send.mock.calls[0][0]; + expect(command.input.Key).toBe("client_subscriptions/client-1.json"); + + process.env.CLIENT_SUBSCRIPTION_CONFIG_PREFIX = saved; + }); + + it("uses default TTL when CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS is not set", async () => { + const saved = process.env.CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS; + delete process.env.CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS; + resetCache(); + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + + const result = await loadTargetConfig("client-1", "target-1"); + + expect(result).toEqual(VALID_TARGET); + + process.env.CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS = saved; + }); + + it("throws when loadClientConfig resolves to undefined", async () => { + const spy = jest + .spyOn(ConfigSubscriptionCache.prototype, "loadClientConfig") + .mockResolvedValueOnce(undefined); + + await expect(loadTargetConfig("client-1", "target-1")).rejects.toThrow( + "No configuration found for client 'client-1'", + ); + + spy.mockRestore(); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/delivery-metrics.test.ts b/lambdas/https-client-lambda/src/__tests__/delivery-metrics.test.ts new file mode 100644 index 00000000..803c19bb --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/delivery-metrics.test.ts @@ -0,0 +1,252 @@ +const mockCreateMetricsLogger = jest.fn(); +jest.mock("aws-embedded-metrics", () => ({ + Unit: { Count: "Count", Milliseconds: "Milliseconds" }, + createMetricsLogger: () => mockCreateMetricsLogger(), +})); + +describe("delivery-metrics", () => { + const mockMetrics = { + setNamespace: jest.fn(), + setDimensions: jest.fn(), + setProperty: jest.fn(), + putMetric: jest.fn(), + flush: jest.fn().mockResolvedValue(undefined), + }; + + beforeEach(() => { + jest.resetModules(); + jest.clearAllMocks(); + mockCreateMetricsLogger.mockReturnValue(mockMetrics); + process.env.METRICS_NAMESPACE = "TestNamespace"; + process.env.ENVIRONMENT = "test"; + }); + + afterEach(() => { + delete process.env.METRICS_NAMESPACE; + delete process.env.ENVIRONMENT; + }); + + it("throws when METRICS_NAMESPACE is not set", async () => { + delete process.env.METRICS_NAMESPACE; + // @ts-expect-error -- modulePaths resolves at runtime + const { emitDeliveryAttempt } = await import("services/delivery-metrics"); + + expect(() => emitDeliveryAttempt("t-1")).toThrow( + "METRICS_NAMESPACE environment variable is not set", + ); + }); + + it("throws when ENVIRONMENT is not set", async () => { + delete process.env.ENVIRONMENT; + // @ts-expect-error -- modulePaths resolves at runtime + const { emitDeliveryAttempt } = await import("services/delivery-metrics"); + + expect(() => emitDeliveryAttempt("t-1")).toThrow( + "ENVIRONMENT environment variable is not set", + ); + }); + + it("creates metrics logger with correct namespace and dimensions", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const { emitDeliveryAttempt } = await import("services/delivery-metrics"); + + emitDeliveryAttempt("t-1"); + + expect(mockMetrics.setNamespace).toHaveBeenCalledWith("TestNamespace"); + expect(mockMetrics.setDimensions).toHaveBeenCalledWith({ + Environment: "test", + }); + }); + + it("caches the metrics logger on subsequent calls", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitDeliveryAttempt, emitDeliverySuccess } = mod; + + emitDeliveryAttempt("t-1"); + emitDeliverySuccess("t-1"); + + expect(mockCreateMetricsLogger).toHaveBeenCalledTimes(1); + }); + + it("emitDeliveryAttempt emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const { emitDeliveryAttempt } = await import("services/delivery-metrics"); + + emitDeliveryAttempt("target-42"); + + expect(mockMetrics.setProperty).toHaveBeenCalledWith( + "targetId", + "target-42", + ); + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "DeliveryAttempt", + 1, + "Count", + ); + }); + + it("emitDeliverySuccess emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const { emitDeliverySuccess } = await import("services/delivery-metrics"); + + emitDeliverySuccess("target-42"); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "DeliverySuccess", + 1, + "Count", + ); + }); + + it("emitDeliveryFailure emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const { emitDeliveryFailure } = await import("services/delivery-metrics"); + + emitDeliveryFailure("target-42"); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "DeliveryFailure", + 1, + "Count", + ); + }); + + it("emitDeliveryPermanentFailure emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitDeliveryPermanentFailure } = mod; + + emitDeliveryPermanentFailure("target-42"); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "DeliveryPermanentFailure", + 1, + "Count", + ); + }); + + it("emitCircuitBreakerOpen emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitCircuitBreakerOpen } = mod; + + emitCircuitBreakerOpen("target-42"); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "CircuitBreakerOpen", + 1, + "Count", + ); + }); + + it("emitRateLimited emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitRateLimited } = mod; + + emitRateLimited("target-42"); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "DeliveryRateLimited", + 1, + "Count", + ); + }); + + it("emitCircuitBreakerClosed emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitCircuitBreakerClosed } = mod; + + emitCircuitBreakerClosed("target-42"); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "CircuitBreakerClosed", + 1, + "Count", + ); + }); + + it("emitRetryWindowExhausted emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitRetryWindowExhausted } = mod; + + emitRetryWindowExhausted("target-42"); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "DeliveryRetryWindowExhausted", + 1, + "Count", + ); + }); + + it("emitAdmissionDenied emits correct metric with reason", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitAdmissionDenied } = mod; + + emitAdmissionDenied("target-42", "rate_limited"); + + expect(mockMetrics.setProperty).toHaveBeenCalledWith( + "targetId", + "target-42", + ); + expect(mockMetrics.setProperty).toHaveBeenCalledWith( + "reason", + "rate_limited", + ); + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "AdmissionDenied", + 1, + "Count", + ); + }); + + it("emitDeliveryDuration emits correct metric", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitDeliveryDuration } = mod; + + emitDeliveryDuration("target-42", 250); + + expect(mockMetrics.putMetric).toHaveBeenCalledWith( + "DeliveryDurationMs", + 250, + "Milliseconds", + ); + }); + + it("flushMetrics calls flush on the instance", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitDeliveryAttempt, flushMetrics } = mod; + + emitDeliveryAttempt("t-1"); + await flushMetrics(); + + expect(mockMetrics.flush).toHaveBeenCalled(); + }); + + it("flushMetrics does nothing when no metrics instance exists", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const { flushMetrics } = await import("services/delivery-metrics"); + + await flushMetrics(); + + expect(mockMetrics.flush).not.toHaveBeenCalled(); + }); + + it("resetMetrics clears the cached instance", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery-metrics"); + const { emitDeliveryAttempt, resetMetrics } = mod; + + emitDeliveryAttempt("t-1"); + resetMetrics(); + emitDeliveryAttempt("t-2"); + + expect(mockCreateMetricsLogger).toHaveBeenCalledTimes(2); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/delivery-observability.test.ts b/lambdas/https-client-lambda/src/__tests__/delivery-observability.test.ts new file mode 100644 index 00000000..25e164a9 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/delivery-observability.test.ts @@ -0,0 +1,225 @@ +import { + recordAdmissionDenied, + recordCircuitBreakerClosed, + recordCircuitBreakerOpen, + recordDeliveryAttempt, + recordDeliveryDuration, + recordDeliveryFailure, + recordDeliveryPermanentFailure, + recordDeliveryRateLimited, + recordDeliverySuccess, + recordRetryWindowExhausted, +} from "services/delivery-observability"; + +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + }, +})); + +jest.mock("services/delivery-metrics", () => ({ + emitAdmissionDenied: jest.fn(), + emitCircuitBreakerClosed: jest.fn(), + emitCircuitBreakerOpen: jest.fn(), + emitDeliveryAttempt: jest.fn(), + emitDeliveryDuration: jest.fn(), + emitDeliveryFailure: jest.fn(), + emitDeliveryPermanentFailure: jest.fn(), + emitDeliverySuccess: jest.fn(), + emitRateLimited: jest.fn(), + emitRetryWindowExhausted: jest.fn(), +})); + +describe("delivery-observability", () => { + it("recordDeliveryAttempt emits metric and logs", () => { + const { emitDeliveryAttempt } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordDeliveryAttempt("client-1", "target-1", "msg-123"); + + expect(emitDeliveryAttempt).toHaveBeenCalledWith("target-1"); + expect(logger.info).toHaveBeenCalledWith( + "Attempting delivery", + expect.objectContaining({ + clientId: "client-1", + targetId: "target-1", + correlationId: "msg-123", + }), + ); + }); + + it("recordDeliverySuccess emits metric and logs", () => { + const { emitDeliverySuccess } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordDeliverySuccess("client-1", "target-1", "msg-123"); + + expect(emitDeliverySuccess).toHaveBeenCalledWith("target-1"); + expect(logger.info).toHaveBeenCalledWith( + "Delivery succeeded", + expect.objectContaining({ + clientId: "client-1", + targetId: "target-1", + correlationId: "msg-123", + }), + ); + }); + + it("recordDeliveryPermanentFailure emits metric and logs warning", () => { + const { emitDeliveryPermanentFailure } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordDeliveryPermanentFailure( + "client-1", + "target-1", + undefined, + undefined, + "msg-123", + ); + + expect(emitDeliveryPermanentFailure).toHaveBeenCalledWith("target-1"); + expect(logger.warn).toHaveBeenCalledWith( + "Permanent delivery failure \u2014 sending to DLQ", + expect.objectContaining({ + clientId: "client-1", + targetId: "target-1", + correlationId: "msg-123", + }), + ); + }); + + it("recordDeliveryRateLimited emits metric and logs", () => { + const { emitRateLimited } = jest.requireMock("services/delivery-metrics"); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordDeliveryRateLimited("client-1", "target-1", "msg-123"); + + expect(emitRateLimited).toHaveBeenCalledWith("target-1"); + expect(logger.info).toHaveBeenCalledWith( + "Rate limited (429)", + expect.objectContaining({ + clientId: "client-1", + targetId: "target-1", + correlationId: "msg-123", + }), + ); + }); + + it("recordDeliveryFailure emits metric and logs warning with context", () => { + const { emitDeliveryFailure } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordDeliveryFailure("client-1", "target-1", 503, 30, 3, "msg-123"); + + expect(emitDeliveryFailure).toHaveBeenCalledWith("target-1"); + expect(logger.warn).toHaveBeenCalledWith( + "Transient delivery failure \u2014 requeuing", + expect.objectContaining({ + clientId: "client-1", + targetId: "target-1", + correlationId: "msg-123", + statusCode: 503, + backoffSec: 30, + receiveCount: 3, + }), + ); + }); + + it("recordCircuitBreakerOpen emits metric and logs", () => { + const { emitCircuitBreakerOpen } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordCircuitBreakerOpen("target-1", "msg-123"); + + expect(emitCircuitBreakerOpen).toHaveBeenCalledWith("target-1"); + expect(logger.warn).toHaveBeenCalledWith( + "Circuit breaker opened", + expect.objectContaining({ + targetId: "target-1", + correlationId: "msg-123", + }), + ); + }); + + it("recordCircuitBreakerClosed emits metric and logs", () => { + const { emitCircuitBreakerClosed } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordCircuitBreakerClosed("target-1", "msg-123"); + + expect(emitCircuitBreakerClosed).toHaveBeenCalledWith("target-1"); + expect(logger.info).toHaveBeenCalledWith( + "Circuit breaker closed", + expect.objectContaining({ + targetId: "target-1", + correlationId: "msg-123", + }), + ); + }); + + it("recordRetryWindowExhausted emits metric and logs", () => { + const { emitRetryWindowExhausted } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordRetryWindowExhausted("client-1", "target-1", "msg-123"); + + expect(emitRetryWindowExhausted).toHaveBeenCalledWith("target-1"); + expect(logger.warn).toHaveBeenCalledWith( + "Retry window exhausted \u2014 sending to DLQ", + expect.objectContaining({ + clientId: "client-1", + targetId: "target-1", + correlationId: "msg-123", + }), + ); + }); + + it("recordAdmissionDenied emits metric and logs", () => { + const { emitAdmissionDenied } = jest.requireMock( + "services/delivery-metrics", + ); + const { logger } = jest.requireMock("@nhs-notify-client-callbacks/logger"); + + recordAdmissionDenied("client-1", "target-1", "rate_limited", "msg-123"); + + expect(emitAdmissionDenied).toHaveBeenCalledWith( + "target-1", + "rate_limited", + ); + expect(logger.warn).toHaveBeenCalledWith( + "Admission denied", + expect.objectContaining({ + clientId: "client-1", + targetId: "target-1", + correlationId: "msg-123", + reason: "rate_limited", + }), + ); + }); + + it("recordDeliveryDuration emits metric", () => { + const { emitDeliveryDuration } = jest.requireMock( + "services/delivery-metrics", + ); + + recordDeliveryDuration("target-1", 250); + + expect(emitDeliveryDuration).toHaveBeenCalledWith("target-1", 250); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/dlq-sender.test.ts b/lambdas/https-client-lambda/src/__tests__/dlq-sender.test.ts new file mode 100644 index 00000000..692e41c9 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/dlq-sender.test.ts @@ -0,0 +1,134 @@ +import { SendMessageCommand } from "@aws-sdk/client-sqs"; + +import { sendToDlq } from "services/dlq-sender"; + +const mockSend = jest.fn(); +jest.mock("@aws-sdk/client-sqs", () => { + const actual = jest.requireActual("@aws-sdk/client-sqs"); + return { + ...actual, + SQSClient: jest.fn().mockImplementation(() => ({ + send: (...args: unknown[]) => mockSend(...args), + })), + }; +}); + +process.env.DLQ_URL = "https://sqs.eu-west-2.invalid/123456789/test-dlq"; + +describe("sendToDlq", () => { + beforeEach(() => { + mockSend.mockReset(); + }); + + it("sends SendMessageCommand with correct QueueUrl and MessageBody", async () => { + mockSend.mockResolvedValue({}); + + await sendToDlq('{"test":"message"}'); + + expect(mockSend).toHaveBeenCalledTimes(1); + const command = mockSend.mock.calls[0][0]; + expect(command).toBeInstanceOf(SendMessageCommand); + expect(command.input).toEqual({ + QueueUrl: "https://sqs.eu-west-2.invalid/123456789/test-dlq", + MessageBody: '{"test":"message"}', + }); + }); + + it("surfaces SDK errors", async () => { + mockSend.mockRejectedValue(new Error("SQS send failed")); + + await expect(sendToDlq("body")).rejects.toThrow("SQS send failed"); + }); + + it("throws when DLQ_URL is not set", async () => { + let sendFn: typeof sendToDlq; + const saved = process.env.DLQ_URL; + delete process.env.DLQ_URL; + + jest.isolateModules(() => { + // eslint-disable-next-line @typescript-eslint/no-require-imports -- jest.isolateModules requires synchronous require + sendFn = require("services/dlq-sender").sendToDlq; + }); + + await expect(sendFn!("body")).rejects.toThrow("DLQ_URL is required"); + + process.env.DLQ_URL = saved; + }); + + it("includes ERROR_CODE and ERROR_MESSAGE for HTTP error with JSON body", async () => { + mockSend.mockResolvedValue({}); + + await sendToDlq('{"test":"message"}', { + statusCode: 400, + responseBody: JSON.stringify({ message: "Bad request" }), + }); + + const command = mockSend.mock.calls[0][0]; + expect(command).toBeInstanceOf(SendMessageCommand); + expect(command.input.MessageAttributes).toEqual({ + ERROR_CODE: { DataType: "String", StringValue: "HTTP_CLIENT_ERROR" }, + ERROR_MESSAGE: { DataType: "String", StringValue: "Bad request" }, + }); + }); + + it("uses raw response body as ERROR_MESSAGE when not valid JSON", async () => { + mockSend.mockResolvedValue({}); + + await sendToDlq('{"test":"message"}', { + statusCode: 400, + responseBody: "Bad request", + }); + + const command = mockSend.mock.calls[0][0]; + expect(command.input.MessageAttributes).toEqual({ + ERROR_CODE: { DataType: "String", StringValue: "HTTP_CLIENT_ERROR" }, + ERROR_MESSAGE: { DataType: "String", StringValue: "Bad request" }, + }); + }); + + it("uses errorCode as ERROR_CODE when provided", async () => { + mockSend.mockResolvedValue({}); + + await sendToDlq('{"test":"message"}', { + errorCode: "CERT_HAS_EXPIRED", + }); + + const command = mockSend.mock.calls[0][0]; + expect(command.input.MessageAttributes).toEqual({ + ERROR_CODE: { DataType: "String", StringValue: "CERT_HAS_EXPIRED" }, + }); + }); + + it("sends empty MessageAttributes when errorInfo has no relevant fields", async () => { + mockSend.mockResolvedValue({}); + + await sendToDlq('{"test":"message"}', {}); + + const command = mockSend.mock.calls[0][0]; + expect(command.input.MessageAttributes).toEqual({}); + }); + + it("sends no MessageAttributes when errorInfo is omitted", async () => { + mockSend.mockResolvedValue({}); + + await sendToDlq('{"test":"message"}'); + + const command = mockSend.mock.calls[0][0]; + expect(command.input.MessageAttributes).toBeUndefined(); + }); + + it("uses JSON body message field when present in responseBody", async () => { + mockSend.mockResolvedValue({}); + + await sendToDlq('{"test":"message"}', { + statusCode: 422, + responseBody: JSON.stringify({ message: "Validation failed", code: 42 }), + }); + + const command = mockSend.mock.calls[0][0]; + expect(command.input.MessageAttributes?.ERROR_MESSAGE).toEqual({ + DataType: "String", + StringValue: "Validation failed", + }); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/endpoint-gate.test.ts b/lambdas/https-client-lambda/src/__tests__/endpoint-gate.test.ts new file mode 100644 index 00000000..efbc6d88 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/endpoint-gate.test.ts @@ -0,0 +1,228 @@ +import { + type EndpointGateConfig, + admit, + recordResult, + resetAdmitSha, +} from "services/endpoint-gate"; + +const mockSendCommand = jest.fn(); +const mockConnect = jest.fn().mockResolvedValue(undefined); +const mockDisconnect = jest.fn().mockResolvedValue(undefined); +const mockOn = jest.fn(); + +const defaultConfig: EndpointGateConfig = { + burstCapacity: 10, + cbProbeIntervalMs: 60_000, + decayPeriodMs: 300_000, + cbWindowPeriodMs: 60_000, + cbErrorThreshold: 0.5, + cbMinAttempts: 10, + cbCooldownMs: 60_000, +}; + +const mockRedis = { + sendCommand: mockSendCommand, + connect: mockConnect, + disconnect: mockDisconnect, + on: mockOn, + isOpen: true, +} as never; + +beforeEach(() => { + jest.clearAllMocks(); + resetAdmitSha(); +}); + +describe("admit", () => { + it("returns allowed when tokens available", async () => { + mockSendCommand.mockResolvedValueOnce([1, "allowed", 0, 10]); + + const result = await admit(mockRedis, "target-1", 10, true, defaultConfig); + + expect(result).toEqual({ allowed: true, probe: false, effectiveRate: 10 }); + expect(mockSendCommand).toHaveBeenCalledWith( + expect.arrayContaining(["EVALSHA"]), + ); + }); + + it("returns rate_limited when tokens exhausted", async () => { + mockSendCommand.mockResolvedValueOnce([0, "rate_limited", 1000, 10]); + + const result = await admit(mockRedis, "target-1", 10, false, defaultConfig); + + expect(result).toEqual({ + allowed: false, + reason: "rate_limited", + retryAfterMs: 1000, + effectiveRate: 10, + }); + }); + + it("returns allowed with probe flag when circuit is open but probe slot is available", async () => { + mockSendCommand.mockResolvedValueOnce([1, "probe", 0, 0]); + + const result = await admit(mockRedis, "target-1", 10, true, defaultConfig); + + expect(result).toEqual({ allowed: true, probe: true, effectiveRate: 0 }); + }); + + it("returns circuit_open without probe slot", async () => { + mockSendCommand.mockResolvedValueOnce([0, "circuit_open", 30_000, 0]); + + const result = await admit(mockRedis, "target-1", 10, true, defaultConfig); + + expect(result).toEqual({ + allowed: false, + reason: "circuit_open", + retryAfterMs: 30_000, + effectiveRate: 0, + }); + }); + + it("falls back to EVAL on NOSCRIPT error", async () => { + mockSendCommand + .mockRejectedValueOnce(new Error("NOSCRIPT No matching script")) + .mockResolvedValueOnce([1, "allowed", 0, 10]); + + const result = await admit(mockRedis, "target-1", 10, true, defaultConfig); + + expect(result).toEqual({ allowed: true, probe: false, effectiveRate: 10 }); + expect(mockSendCommand).toHaveBeenCalledTimes(2); + expect(mockSendCommand).toHaveBeenNthCalledWith( + 1, + expect.arrayContaining(["EVALSHA"]), + ); + expect(mockSendCommand).toHaveBeenNthCalledWith( + 2, + expect.arrayContaining(["EVAL"]), + ); + }); + + it("passes cbProbeIntervalMs=0 when circuit breaker is disabled", async () => { + mockSendCommand.mockResolvedValueOnce([1, "allowed", 0, 10]); + + await admit(mockRedis, "target-1", 10, false, defaultConfig); + + // EVALSHA layout: [EVALSHA, sha, keyCount, cbKey, rlKey, now, capacity, refillPerSec, cooldownMs, decayPeriodMs, cbWindowPeriodMs, cbProbeIntervalMs] + const args = mockSendCommand.mock.calls[0]![0] as string[]; + const cbProbeIntervalArg = args[11]; + expect(cbProbeIntervalArg).toBe("0"); + }); + + it("passes cbKey first, rlKey second", async () => { + mockSendCommand.mockResolvedValueOnce([1, "allowed", 0, 5]); + + await admit(mockRedis, "my-target", 5, true, defaultConfig); + + const args = mockSendCommand.mock.calls[0]![0] as string[]; + expect(args[3]).toBe("cb:{my-target}"); + expect(args[4]).toBe("rl:{my-target}"); + }); +}); + +describe("evalScript", () => { + it("throws a wrapped error including the original message when EVALSHA fails with a non-NOSCRIPT Error", async () => { + const redisError = new Error("WRONGTYPE Operation against a key"); + mockSendCommand.mockRejectedValueOnce(redisError); + + const thrown = await admit( + mockRedis, + "target-1", + 10, + true, + defaultConfig, + ).catch((error: unknown) => error); + + expect(thrown).toBeInstanceOf(Error); + expect((thrown as Error).message).toContain("Redis error in script"); + expect((thrown as Error).message).toContain( + "WRONGTYPE Operation against a key", + ); + expect((thrown as Error & { cause: unknown }).cause).toBe(redisError); + }); + + it("throws a wrapped error using String() when EVALSHA rejects with a non-Error value", async () => { + mockSendCommand.mockRejectedValueOnce("connection refused"); + + const thrown = await admit( + mockRedis, + "target-1", + 10, + true, + defaultConfig, + ).catch((error: unknown) => error); + + expect(thrown).toBeInstanceOf(Error); + expect((thrown as Error).message).toContain("Redis error in script"); + expect((thrown as Error).message).toContain("connection refused"); + }); +}); + +describe("recordResult", () => { + it("returns closed on success below threshold", async () => { + mockSendCommand.mockResolvedValueOnce([1, "closed"]); + + const result = await recordResult( + mockRedis, + "target-1", + true, + defaultConfig, + ); + + expect(result).toEqual({ ok: true, state: "closed" }); + expect(mockSendCommand).toHaveBeenCalledWith( + expect.arrayContaining(["EVALSHA"]), + ); + }); + + it("returns opened when failure crosses threshold", async () => { + mockSendCommand.mockResolvedValueOnce([0, "opened"]); + + const result = await recordResult( + mockRedis, + "target-1", + false, + defaultConfig, + ); + + expect(result).toEqual({ ok: false, state: "opened" }); + }); + + it("returns failed when failure is below threshold", async () => { + mockSendCommand.mockResolvedValueOnce([0, "failed"]); + + const result = await recordResult( + mockRedis, + "target-1", + false, + defaultConfig, + ); + + expect(result).toEqual({ ok: false, state: "failed" }); + }); + + it("falls back to EVAL on NOSCRIPT error", async () => { + mockSendCommand + .mockRejectedValueOnce(new Error("NOSCRIPT No matching script")) + .mockResolvedValueOnce([1, "closed"]); + + const result = await recordResult( + mockRedis, + "target-1", + true, + defaultConfig, + ); + + expect(result).toEqual({ ok: true, state: "closed" }); + expect(mockSendCommand).toHaveBeenCalledTimes(2); + }); + + it("passes correct cb key for target", async () => { + mockSendCommand.mockResolvedValueOnce([1, "closed"]); + + await recordResult(mockRedis, "my-target", true, defaultConfig); + + const args = mockSendCommand.mock.calls[0]![0] as string[]; + expect(args[3]).toBe("cb:{my-target}"); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/fixtures/handler-fixtures.ts b/lambdas/https-client-lambda/src/__tests__/fixtures/handler-fixtures.ts new file mode 100644 index 00000000..731d478a --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/fixtures/handler-fixtures.ts @@ -0,0 +1,45 @@ +import type { SQSRecord } from "aws-lambda"; + +export const DEFAULT_TARGET = { + targetId: "target-1", + type: "API" as const, + invocationEndpoint: "https://webhook.example.invalid", + invocationMethod: "POST" as const, + invocationRateLimit: 10, + apiKey: { headerName: "x-api-key", headerValue: "secret-key" }, + delivery: { + mtls: { enabled: true }, + }, +}; + +export const makeRecord = (overrides: Partial = {}): SQSRecord => ({ + messageId: "msg-1", + receiptHandle: "receipt-1", + body: JSON.stringify({ + payload: { + data: [ + { + type: "MessageStatus", + attributes: { + messageId: "test-message-id", + messageStatus: "delivered", + }, + }, + ], + }, + subscriptionId: "sub-1", + targetId: "target-1", + }), + attributes: { + ApproximateReceiveCount: "1", + SentTimestamp: "0", + SenderId: "sender", + ApproximateFirstReceiveTimestamp: "0", + }, + messageAttributes: {}, + md5OfBody: "abc", + eventSource: "aws:sqs", + eventSourceARN: "arn:aws:sqs:eu-west-2:123:queue", + awsRegion: "eu-west-2", + ...overrides, +}); diff --git a/lambdas/https-client-lambda/src/__tests__/handler.test.ts b/lambdas/https-client-lambda/src/__tests__/handler.test.ts new file mode 100644 index 00000000..3b8ad521 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/handler.test.ts @@ -0,0 +1,561 @@ +import { processRecords } from "handler"; +import { + DEFAULT_TARGET, + makeRecord, +} from "__tests__/fixtures/handler-fixtures"; +import { VisibilityManagedError } from "services/visibility-managed-error"; + +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + }, +})); + +const mockLoadTargetConfig = jest.fn(); +jest.mock("services/config-loader", () => ({ + loadTargetConfig: (...args: unknown[]) => mockLoadTargetConfig(...args), +})); + +const mockGetApplicationId = jest.fn(); +jest.mock("services/ssm-applications-map", () => ({ + getApplicationId: (...args: unknown[]) => mockGetApplicationId(...args), +})); + +const mockSignPayload = jest.fn(); +jest.mock("services/payload-signer", () => ({ + signPayload: (...args: unknown[]) => mockSignPayload(...args), +})); + +const mockBuildAgent = jest.fn(); +jest.mock("services/delivery/tls-agent-factory", () => ({ + buildAgent: (...args: unknown[]) => mockBuildAgent(...args), +})); + +const mockDeliverPayload = jest.fn(); +jest.mock("services/delivery/https-client", () => ({ + deliverPayload: (...args: unknown[]) => mockDeliverPayload(...args), + OUTCOME_SUCCESS: "success", + OUTCOME_PERMANENT_FAILURE: "permanent_failure", + OUTCOME_RATE_LIMITED: "rate_limited", + OUTCOME_TRANSIENT_FAILURE: "transient_failure", +})); + +const mockSendToDlq = jest.fn(); +jest.mock("services/dlq-sender", () => ({ + sendToDlq: (...args: unknown[]) => mockSendToDlq(...args), +})); + +const mockChangeVisibility = jest.fn(); +jest.mock("services/sqs-visibility", () => ({ + changeVisibility: (...args: unknown[]) => mockChangeVisibility(...args), +})); + +const mockJitteredBackoff = jest.fn(); +const mockIsWindowExhausted = jest.fn(); +const mockHandleRateLimitedRecord = jest.fn(); +jest.mock("services/delivery/retry-policy", () => ({ + jitteredBackoffSeconds: (...args: unknown[]) => mockJitteredBackoff(...args), + isWindowExhausted: (...args: unknown[]) => mockIsWindowExhausted(...args), + handleRateLimitedRecord: (...args: unknown[]) => + mockHandleRateLimitedRecord(...args), +})); + +const mockAdmit = jest.fn(); +const mockGetRedisClient = jest.fn(); +const mockRecordResult = jest.fn(); +jest.mock("services/endpoint-gate", () => ({ + admit: (...args: unknown[]) => mockAdmit(...args), + recordResult: (...args: unknown[]) => mockRecordResult(...args), +})); +jest.mock("services/redis-client", () => ({ + getRedisClient: (...args: unknown[]) => mockGetRedisClient(...args), +})); + +jest.mock("services/delivery-metrics", () => ({ + emitAdmissionDenied: jest.fn(), + emitCircuitBreakerClosed: jest.fn(), + emitCircuitBreakerOpen: jest.fn(), + emitDeliveryAttempt: jest.fn(), + emitDeliveryDuration: jest.fn(), + emitDeliveryFailure: jest.fn(), + emitDeliveryPermanentFailure: jest.fn(), + emitDeliverySuccess: jest.fn(), + emitRateLimited: jest.fn(), + emitRetryWindowExhausted: jest.fn(), + flushMetrics: jest.fn().mockResolvedValue(undefined), + resetMetrics: jest.fn(), +})); + +process.env.CLIENT_ID = "client-1"; + +describe("processRecords", () => { + const mockAgent = {}; + + beforeEach(() => { + jest.clearAllMocks(); + mockLoadTargetConfig.mockResolvedValue(DEFAULT_TARGET); + mockGetApplicationId.mockResolvedValue("app-id-1"); + mockSignPayload.mockReturnValue("signature-abc"); + mockBuildAgent.mockResolvedValue(mockAgent); + mockDeliverPayload.mockResolvedValue({ outcome: "success" }); + mockSendToDlq.mockResolvedValue(undefined); + mockChangeVisibility.mockResolvedValue(undefined); + mockJitteredBackoff.mockReturnValue(5); + mockIsWindowExhausted.mockReturnValue(false); + mockHandleRateLimitedRecord.mockRejectedValue( + new VisibilityManagedError("Rate limited — requeue"), + ); + mockGetRedisClient.mockResolvedValue({}); + mockAdmit.mockResolvedValue({ + allowed: true, + probe: false, + effectiveRate: 10, + }); + mockRecordResult.mockResolvedValue({ ok: true, state: "closed" }); + }); + + it("returns no failures on successful delivery", async () => { + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + expect(mockLoadTargetConfig).toHaveBeenCalledWith("client-1", "target-1"); + expect(mockGetApplicationId).toHaveBeenCalledWith("client-1"); + expect(mockSignPayload).toHaveBeenCalledWith( + "app-id-1", + "secret-key", + expect.objectContaining({ data: expect.any(Array) }), + ); + expect(mockBuildAgent).toHaveBeenCalledWith(DEFAULT_TARGET); + expect(mockDeliverPayload).toHaveBeenCalledWith( + DEFAULT_TARGET, + expect.any(String), + "signature-abc", + mockAgent, + ); + }); + + it("sends permanent failure to DLQ and returns no failure", async () => { + mockDeliverPayload.mockResolvedValue({ outcome: "permanent_failure" }); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + expect(mockSendToDlq).toHaveBeenCalledWith(makeRecord().body, { + outcome: "permanent_failure", + }); + }); + + it("returns failure for transient 5xx errors", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + }); + + it("returns failure for 429 rate-limited responses", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "rate_limited", + retryAfterHeader: "60", + }); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + expect(mockHandleRateLimitedRecord).toHaveBeenCalledWith( + makeRecord(), + "client-1", + "target-1", + "60", + 1, + ); + }); + + it("processes multiple records independently", async () => { + const record1 = makeRecord({ messageId: "msg-1" }); + const record2 = makeRecord({ messageId: "msg-2" }); + + mockDeliverPayload + .mockResolvedValueOnce({ outcome: "success" }) + .mockResolvedValueOnce({ + outcome: "transient_failure", + statusCode: 500, + }); + + const failures = await processRecords([record1, record2]); + + expect(failures).toEqual([{ itemIdentifier: "msg-2" }]); + }); + + it("an unexpected error on one record does not prevent subsequent records being processed", async () => { + const record1 = makeRecord({ messageId: "msg-1" }); + const record2 = makeRecord({ messageId: "msg-2" }); + + mockLoadTargetConfig + .mockRejectedValueOnce(new Error("S3 unavailable")) + .mockResolvedValueOnce(DEFAULT_TARGET); + + const failures = await processRecords([record1, record2]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + expect(mockDeliverPayload).toHaveBeenCalledTimes(1); + expect(mockChangeVisibility).toHaveBeenCalledWith("receipt-1", 5); + }); + + it("applies jittered backoff cooldown on unexpected errors", async () => { + mockLoadTargetConfig.mockRejectedValue(new Error("Infrastructure error")); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + expect(mockChangeVisibility).toHaveBeenCalledWith("receipt-1", 5); + }); + + it("does not apply a second visibility change for admission-denied (managed path)", async () => { + mockAdmit.mockResolvedValue({ + allowed: false, + reason: "rate_limited", + retryAfterMs: 2000, + effectiveRate: 10, + }); + + await processRecords([makeRecord()]); + + expect(mockChangeVisibility).toHaveBeenCalledTimes(1); + }); + + it("does not apply a second visibility change for transient failure (managed path)", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + + await processRecords([makeRecord()]); + + expect(mockChangeVisibility).toHaveBeenCalledTimes(1); + }); + + it("returns failure when CLIENT_ID is not set", async () => { + const saved = process.env.CLIENT_ID; + delete process.env.CLIENT_ID; + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + + process.env.CLIENT_ID = saved; + }); + + it("sends to DLQ when retry window is exhausted", async () => { + mockIsWindowExhausted.mockReturnValue(true); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + expect(mockSendToDlq).toHaveBeenCalledWith(makeRecord().body); + expect(mockDeliverPayload).not.toHaveBeenCalled(); + }); + + it("calls changeVisibility with backoff on 5xx then throws", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + expect(mockChangeVisibility).toHaveBeenCalledWith("receipt-1", 5); + }); + + it("delegates 429 handling to handleRateLimitedRecord", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "rate_limited", + retryAfterHeader: "120", + }); + + await processRecords([makeRecord()]); + + expect(mockHandleRateLimitedRecord).toHaveBeenCalledWith( + makeRecord(), + "client-1", + "target-1", + "120", + 1, + ); + }); + + it("returns no failure when handleRateLimitedRecord resolves (e.g. DLQ path)", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "rate_limited", + retryAfterHeader: "99999", + }); + mockHandleRateLimitedRecord.mockResolvedValue(undefined); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + }); + + it("requeues when rate limited by endpoint gate", async () => { + mockAdmit.mockResolvedValue({ + allowed: false, + reason: "rate_limited", + retryAfterMs: 2000, + effectiveRate: 10, + }); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + expect(mockChangeVisibility).toHaveBeenCalledWith("receipt-1", 2); + expect(mockSendToDlq).not.toHaveBeenCalled(); + expect(mockDeliverPayload).not.toHaveBeenCalled(); + }); + + it("requeues when circuit is open", async () => { + mockAdmit.mockResolvedValue({ + allowed: false, + reason: "circuit_open", + retryAfterMs: 30_000, + effectiveRate: 0, + }); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + expect(mockChangeVisibility).toHaveBeenCalledWith("receipt-1", 30); + expect(mockSendToDlq).not.toHaveBeenCalled(); + expect(mockDeliverPayload).not.toHaveBeenCalled(); + }); + + it("proceeds to delivery when circuit breaker is disabled", async () => { + const targetNoCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: false } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetNoCb); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + expect(mockAdmit).toHaveBeenCalledWith( + expect.anything(), + "target-1", + 10, + false, + expect.any(Object), + ); + expect(mockDeliverPayload).toHaveBeenCalled(); + }); + + it("calls recordResult(true) on successful delivery when CB enabled", async () => { + const targetCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: true } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetCb); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + expect(mockRecordResult).toHaveBeenCalledWith( + expect.anything(), + "target-1", + true, + expect.any(Object), + ); + }); + + it("calls recordResult(false) on 5xx before visibility change", async () => { + const targetCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: true } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetCb); + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([{ itemIdentifier: "msg-1" }]); + expect(mockRecordResult).toHaveBeenCalledWith( + expect.anything(), + "target-1", + false, + expect.any(Object), + ); + expect(mockChangeVisibility).toHaveBeenCalled(); + }); + + it("does not call recordResult on rate-limited path", async () => { + mockAdmit.mockResolvedValue({ + allowed: false, + reason: "rate_limited", + retryAfterMs: 2000, + effectiveRate: 10, + }); + + await processRecords([makeRecord()]); + + expect(mockRecordResult).not.toHaveBeenCalled(); + }); + + it("does not call recordResult on 429 path", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "rate_limited", + retryAfterHeader: "60", + }); + + await processRecords([makeRecord()]); + + expect(mockRecordResult).not.toHaveBeenCalled(); + }); + + it("does not call recordResult when CB is disabled on transient failure", async () => { + const targetNoCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: false } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetNoCb); + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + + await processRecords([makeRecord()]); + + expect(mockRecordResult).not.toHaveBeenCalled(); + expect(mockChangeVisibility).toHaveBeenCalled(); + }); + + it("does not call recordResult when CB is disabled on success", async () => { + const targetNoCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: false } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetNoCb); + + await processRecords([makeRecord()]); + + expect(mockRecordResult).not.toHaveBeenCalled(); + }); + + it("emits CircuitBreakerOpen metric when recordResult returns opened", async () => { + const targetCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: true } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetCb); + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + mockRecordResult.mockResolvedValue({ ok: false, state: "opened" }); + + const { emitCircuitBreakerOpen } = jest.requireMock( + "services/delivery-metrics", + ); + + await processRecords([makeRecord()]); + + expect(emitCircuitBreakerOpen).toHaveBeenCalledWith("target-1"); + }); + + it("does not emit CircuitBreakerOpen when recordResult returns failed", async () => { + const targetCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: true } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetCb); + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + mockRecordResult.mockResolvedValue({ ok: false, state: "failed" }); + + const { emitCircuitBreakerOpen } = jest.requireMock( + "services/delivery-metrics", + ); + + await processRecords([makeRecord()]); + + expect(emitCircuitBreakerOpen).not.toHaveBeenCalled(); + }); + + it("does not emit CircuitBreakerOpen when recordResult returns closed", async () => { + const targetCb = { + ...DEFAULT_TARGET, + delivery: { circuitBreaker: { enabled: true } }, + }; + mockLoadTargetConfig.mockResolvedValue(targetCb); + mockDeliverPayload.mockResolvedValue({ + outcome: "transient_failure", + statusCode: 503, + }); + mockRecordResult.mockResolvedValue({ ok: true, state: "closed" }); + + const { emitCircuitBreakerOpen } = jest.requireMock( + "services/delivery-metrics", + ); + + await processRecords([makeRecord()]); + + expect(emitCircuitBreakerOpen).not.toHaveBeenCalled(); + }); + + it("emits RateLimited metric on 429 response", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "rate_limited", + retryAfterHeader: "60", + }); + + const { emitRateLimited } = jest.requireMock("services/delivery-metrics"); + + await processRecords([makeRecord()]); + + expect(emitRateLimited).toHaveBeenCalledWith("target-1"); + }); + + it("uses configured maxRetryDurationSeconds when set on target", async () => { + const targetWithRetry = { + ...DEFAULT_TARGET, + delivery: { ...DEFAULT_TARGET.delivery, maxRetryDurationSeconds: 3600 }, + }; + mockLoadTargetConfig.mockResolvedValue(targetWithRetry); + mockIsWindowExhausted.mockReturnValue(false); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + expect(mockIsWindowExhausted).toHaveBeenCalledWith( + expect.any(Number), + 3_600_000, + ); + }); + + it("returns no failure when handleRateLimitedRecord resolves without throwing", async () => { + mockDeliverPayload.mockResolvedValue({ + outcome: "permanent_failure", + statusCode: 429, + retryAfterHeader: "60", + }); + mockHandleRateLimitedRecord.mockResolvedValueOnce(undefined); + + const failures = await processRecords([makeRecord()]); + + expect(failures).toEqual([]); + expect(mockIsWindowExhausted).toHaveBeenCalledWith( + expect.any(Number), + 7_200_000, + ); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/helpers/fengari.d.ts b/lambdas/https-client-lambda/src/__tests__/helpers/fengari.d.ts new file mode 100644 index 00000000..e40c1e59 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/helpers/fengari.d.ts @@ -0,0 +1,32 @@ +declare module "fengari" { + type LuaState = object; + + const lua: { + LUA_OK: number; + lua_close(L: LuaState): void; + lua_createtable(L: LuaState, narr: number, nrec: number): void; + lua_getglobal(L: LuaState, name: Uint8Array): number; + lua_gettop(L: LuaState): number; + lua_pushboolean(L: LuaState, b: number): void; + lua_pushcfunction(L: LuaState, fn: (L: LuaState) => number): void; + lua_pushinteger(L: LuaState, n: number): void; + lua_pushstring(L: LuaState, s: Uint8Array): void; + lua_rawseti(L: LuaState, idx: number, n: number): void; + lua_setglobal(L: LuaState, name: Uint8Array): void; + lua_tostring(L: LuaState, idx: number): Uint8Array; + }; + + const lauxlib: { + luaL_dostring(L: LuaState, s: Uint8Array): number; + luaL_newstate(): LuaState; + }; + + const lualib: { + luaL_openlibs(L: LuaState): void; + }; + + // eslint-disable-next-line @typescript-eslint/naming-convention -- fengari uses snake_case names + function to_jsstring(s: Uint8Array): string; + // eslint-disable-next-line @typescript-eslint/naming-convention -- fengari uses snake_case names + function to_luastring(s: string): Uint8Array; +} diff --git a/lambdas/https-client-lambda/src/__tests__/helpers/lua-redis-mock.ts b/lambdas/https-client-lambda/src/__tests__/helpers/lua-redis-mock.ts new file mode 100644 index 00000000..f6d11d50 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/helpers/lua-redis-mock.ts @@ -0,0 +1,164 @@ +import { + lauxlib, + lua, + lualib, + to_jsstring as toJsstring, + to_luastring as toLuastring, +} from "fengari"; + +type LuaState = ReturnType; +type RedisStore = Map>; + +export function createRedisStore(): RedisStore { + return new Map(); +} + +function hset(store: RedisStore, key: string, pairs: string[]): number { + const hash = store.get(key) ?? new Map(); + store.set(key, hash); + let fieldsSet = 0; + for (let i = 0; i < pairs.length; i += 2) { + // eslint-disable-next-line security/detect-object-injection -- pairs is a controlled array from redis HSET parsing + hash.set(pairs[i], pairs[i + 1]); + fieldsSet += 1; + } + return fieldsSet; +} + +function redisCallHandler(L: LuaState, store: RedisStore): number { + const cmd = toJsstring(lua.lua_tostring(L, 1)).toUpperCase(); + + if (cmd === "HMGET") { + const key = toJsstring(lua.lua_tostring(L, 2)); + const nArgs = lua.lua_gettop(L); + const hash = store.get(key); + lua.lua_createtable(L, nArgs - 2, 0); + for (let i = 3; i <= nArgs; i++) { + const field = toJsstring(lua.lua_tostring(L, i)); + const val = hash?.get(field); + if (val === undefined) { + lua.lua_pushboolean(L, 0); + } else { + lua.lua_pushstring(L, toLuastring(val)); + } + lua.lua_rawseti(L, -2, i - 2); + } + return 1; + } + + if (cmd === "HSET") { + const key = toJsstring(lua.lua_tostring(L, 2)); + const nArgs = lua.lua_gettop(L); + const pairs: string[] = []; + for (let i = 3; i <= nArgs; i++) { + pairs.push(toJsstring(lua.lua_tostring(L, i))); + } + const count = hset(store, key, pairs); + lua.lua_pushinteger(L, count); + return 1; + } + + if (cmd === "EXPIRE") { + lua.lua_pushinteger(L, 1); + return 1; + } + + throw new Error(`Unsupported Redis command in mock: ${cmd}`); +} + +const CJSON_AND_REDIS_PREAMBLE = ` + cjson = {} + function cjson.encode(t) + if t == nil then return "null" end + if type(t) ~= "table" then + if type(t) == "string" then return '"' .. t .. '"' end + if type(t) == "boolean" then return t and "true" or "false" end + if type(t) == "number" then + if t == math.floor(t) and t < 1e15 and t > -1e15 then + return string.format("%d", t) + end + return tostring(t) + end + return tostring(t) + end + local n = #t + local isArray = n > 0 + if isArray then + for k in pairs(t) do + if type(k) ~= "number" or k ~= math.floor(k) or k < 1 or k > n then + isArray = false + break + end + end + end + if isArray then + local parts = {} + for i = 1, n do + parts[#parts + 1] = cjson.encode(t[i]) + end + return "[" .. table.concat(parts, ",") .. "]" + end + local parts = {} + for k, v in pairs(t) do + parts[#parts + 1] = '"' .. tostring(k) .. '":' .. cjson.encode(v) + end + return "{" .. table.concat(parts, ",") .. "}" + end + + redis = {} + function redis.call(cmd, ...) + return __redis_call(cmd, ...) + end +`; + +function registerRedisCallGlobal(L: LuaState, store: RedisStore): void { + lua.lua_pushcfunction(L, (ls: LuaState) => redisCallHandler(ls, store)); + lua.lua_setglobal(L, toLuastring("__redis_call")); +} + +function installCjsonAndRedisShims(L: LuaState): void { + lauxlib.luaL_dostring(L, toLuastring(CJSON_AND_REDIS_PREAMBLE)); +} + +function setStringArrayGlobal( + L: LuaState, + name: string, + values: string[], +): void { + lua.lua_createtable(L, values.length, 0); + for (const [i, value] of values.entries()) { + lua.lua_pushstring(L, toLuastring(value)); + lua.lua_rawseti(L, -2, i + 1); + } + lua.lua_setglobal(L, toLuastring(name)); +} + +function runScript(L: LuaState, script: string): string { + const wrapped = `local __r = (function()\n${script}\nend)()\nreturn cjson.encode(__r)`; + const status = lauxlib.luaL_dostring(L, toLuastring(wrapped)); + if (status !== lua.LUA_OK) { + const errMsg = toJsstring(lua.lua_tostring(L, -1)); + throw new Error(`Lua error: ${errMsg}`); + } + return toJsstring(lua.lua_tostring(L, -1)); +} + +export function evalLua( + script: string, + keys: string[], + argv: string[], + store: RedisStore, +): unknown { + const L: LuaState = lauxlib.luaL_newstate(); + lualib.luaL_openlibs(L); + + try { + registerRedisCallGlobal(L, store); + installCjsonAndRedisShims(L); + setStringArrayGlobal(L, "KEYS", keys); + setStringArrayGlobal(L, "ARGV", argv); + return JSON.parse(runScript(L, script)); + } finally { + lua.lua_close(L); + } +} diff --git a/lambdas/https-client-lambda/src/__tests__/https-client.test.ts b/lambdas/https-client-lambda/src/__tests__/https-client.test.ts new file mode 100644 index 00000000..a6229c57 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/https-client.test.ts @@ -0,0 +1,348 @@ +/* eslint-disable unicorn/prefer-event-target -- Node.js http module mock requires EventEmitter API */ +import { EventEmitter } from "node:events"; +import https, { Agent } from "node:https"; +import type { CallbackTarget } from "@nhs-notify-client-callbacks/models"; + +import { deliverPayload } from "services/delivery/https-client"; + +jest.mock("services/delivery/tls-agent-factory", () => ({ + PERMANENT_TLS_ERROR_CODES: new Set([ + "CERT_HAS_EXPIRED", + "DEPTH_ZERO_SELF_SIGNED_CERT", + "ERR_CERT_PINNING_FAILED", + "ERR_TLS_CERT_ALTNAME_INVALID", + "SELF_SIGNED_CERT_IN_CHAIN", + "UNABLE_TO_VERIFY_LEAF_SIGNATURE", + ]), +})); + +const createTarget = (): CallbackTarget => ({ + targetId: "target-1", + type: "API", + invocationEndpoint: "https://webhook.example.invalid:8443/callback", + invocationMethod: "POST", + invocationRateLimit: 10, + apiKey: { headerName: "x-api-key", headerValue: "secret" }, +}); + +const createMockAgent = () => ({}) as Agent; + +type MockResponse = EventEmitter & { + statusCode: number; + headers: Record; + resume: jest.Mock; +}; + +function mockHttpsRequest( + statusCode: number, + headers: Record = {}, + body = "", +) { + const mockReq = new EventEmitter() as EventEmitter & { + end: jest.Mock; + destroy: jest.Mock; + }; + mockReq.end = jest.fn(); + mockReq.destroy = jest.fn(); + + jest.spyOn(https, "request").mockImplementation((...args: unknown[]) => { + const callback = args.find((a) => typeof a === "function") as + | ((res: MockResponse) => void) + | undefined; + + const res: MockResponse = Object.assign(new EventEmitter(), { + statusCode, + headers, + resume: jest.fn(), + }); + + if (callback) { + process.nextTick(() => { + callback(res); + process.nextTick(() => { + if (body) res.emit("data", Buffer.from(body)); + res.emit("end"); + }); + }); + } + + return mockReq as unknown as ReturnType; + }); + + return mockReq; +} + +function mockHttpsRequestError(errorCode: string) { + const mockReq = new EventEmitter() as EventEmitter & { + end: jest.Mock; + destroy: jest.Mock; + }; + mockReq.end = jest.fn(); + mockReq.destroy = jest.fn(); + + jest.spyOn(https, "request").mockImplementation(() => { + process.nextTick(() => { + const error = new Error("TLS error") as NodeJS.ErrnoException; + error.code = errorCode; + mockReq.emit("error", error); + }); + + return mockReq as unknown as ReturnType; + }); + + return mockReq; +} + +function mockHttpsRequestTimeout() { + const mockReq = new EventEmitter() as EventEmitter & { + end: jest.Mock; + destroy: jest.Mock; + }; + mockReq.end = jest.fn(); + mockReq.destroy = jest.fn((error?: Error) => { + if (error) { + process.nextTick(() => mockReq.emit("error", error)); + } + }); + + jest.spyOn(https, "request").mockImplementation(() => { + process.nextTick(() => mockReq.emit("timeout")); + return mockReq as unknown as ReturnType; + }); + + return mockReq; +} + +describe("deliverPayload", () => { + afterEach(() => { + jest.restoreAllMocks(); + }); + + it("returns success on 2xx", async () => { + mockHttpsRequest(200); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ outcome: "success" }); + }); + + it("returns permanent_failure on 4xx non-429", async () => { + mockHttpsRequest(400, {}, JSON.stringify({ message: "Bad request" })); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ + outcome: "permanent_failure", + statusCode: 400, + responseBody: JSON.stringify({ message: "Bad request" }), + }); + }); + + it("returns permanent_failure on TLS error CERT_HAS_EXPIRED", async () => { + mockHttpsRequestError("CERT_HAS_EXPIRED"); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ + outcome: "permanent_failure", + errorCode: "CERT_HAS_EXPIRED", + }); + }); + + it("returns permanent_failure on TLS pinning error", async () => { + mockHttpsRequestError("ERR_CERT_PINNING_FAILED"); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ + outcome: "permanent_failure", + errorCode: "ERR_CERT_PINNING_FAILED", + }); + }); + + it("returns transient_failure on 5xx", async () => { + mockHttpsRequest(503); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ outcome: "transient_failure", statusCode: 503 }); + }); + + it("returns rate_limited with Retry-After header value", async () => { + mockHttpsRequest(429, { "retry-after": "60" }); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ + outcome: "rate_limited", + retryAfterHeader: "60", + statusCode: 429, + }); + }); + + it("returns rate_limited with undefined retryAfterHeader when header is absent", async () => { + mockHttpsRequest(429); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ + outcome: "rate_limited", + retryAfterHeader: undefined, + statusCode: 429, + }); + }); + + it("returns transient_failure on TCP error", async () => { + mockHttpsRequestError("ECONNREFUSED"); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ outcome: "transient_failure", statusCode: 0 }); + }); + + it("uses port 443 when URL has no explicit port", async () => { + mockHttpsRequest(200); + const target = createTarget(); + target.invocationEndpoint = "https://webhook.example.invalid/callback"; + + const result = await deliverPayload( + target, + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ outcome: "success" }); + const callUrl = (https.request as jest.Mock).mock.calls[0][0] as URL; + expect(callUrl).toBeInstanceOf(URL); + expect(callUrl.port).toBe(""); + }); + + it("returns transient failure on request timeout", async () => { + mockHttpsRequestTimeout(); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ outcome: "transient_failure", statusCode: 0 }); + }); + + it("treats undefined statusCode as transient failure with code 0", async () => { + const mockReq = new EventEmitter() as EventEmitter & { + end: jest.Mock; + destroy: jest.Mock; + }; + mockReq.end = jest.fn(); + mockReq.destroy = jest.fn(); + + jest.spyOn(https, "request").mockImplementation((...args: unknown[]) => { + const callback = args.find((a) => typeof a === "function") as + | ((res: MockResponse) => void) + | undefined; + + const res: MockResponse = Object.assign(new EventEmitter(), { + statusCode: undefined as unknown as number, + headers: {}, + resume: jest.fn(), + }); + + if (callback) { + process.nextTick(() => callback(res)); + } + + return mockReq as unknown as ReturnType; + }); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ outcome: "transient_failure", statusCode: 0 }); + }); + + it("treats undefined statusCode as 0", async () => { + const mockReq = new EventEmitter() as EventEmitter & { + end: jest.Mock; + destroy: jest.Mock; + }; + mockReq.end = jest.fn(); + mockReq.destroy = jest.fn(); + + jest.spyOn(https, "request").mockImplementation((...args: unknown[]) => { + const callback = args.find((a) => typeof a === "function") as + | ((res: MockResponse) => void) + | undefined; + + const res = Object.assign(new EventEmitter(), { + statusCode: undefined as unknown as number, + headers: {}, + resume: jest.fn(), + }) as MockResponse; + + if (callback) { + process.nextTick(() => { + callback(res); + process.nextTick(() => (res as EventEmitter).emit("end")); + }); + } + + return mockReq as unknown as ReturnType; + }); + + const result = await deliverPayload( + createTarget(), + '{"test":true}', + "sig-abc", + createMockAgent(), + ); + + expect(result).toEqual({ outcome: "transient_failure", statusCode: 0 }); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/index.test.ts b/lambdas/https-client-lambda/src/__tests__/index.test.ts new file mode 100644 index 00000000..53394149 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/index.test.ts @@ -0,0 +1,36 @@ +import { handler } from "index"; +import { processRecords } from "handler"; + +jest.mock("handler", () => ({ + processRecords: jest.fn().mockResolvedValue([]), +})); + +describe("handler", () => { + it("returns batchItemFailures from processRecords", async () => { + const event = { + Records: [ + { + messageId: "msg-1", + receiptHandle: "r-1", + body: "{}", + attributes: { + ApproximateReceiveCount: "1", + SentTimestamp: "0", + SenderId: "sender", + ApproximateFirstReceiveTimestamp: "0", + }, + messageAttributes: {}, + md5OfBody: "abc", + eventSource: "aws:sqs", + eventSourceARN: "arn:aws:sqs:eu-west-2:123:queue", + awsRegion: "eu-west-2", + }, + ], + }; + + const result = await handler(event); + + expect(result).toEqual({ batchItemFailures: [] }); + expect(processRecords).toHaveBeenCalledWith(event.Records); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/payload-signer.test.ts b/lambdas/https-client-lambda/src/__tests__/payload-signer.test.ts new file mode 100644 index 00000000..191f85dc --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/payload-signer.test.ts @@ -0,0 +1,52 @@ +import { createHmac } from "node:crypto"; +import { signPayload } from "services/payload-signer"; + +const makePayload = () => + ({ + data: [ + { type: "MessageStatus", attributes: { messageStatus: "delivered" } }, + ], + }) as Parameters[2]; + +describe("signPayload", () => { + it("produces correct HMAC-SHA256 output for a known input", () => { + const payload = makePayload(); + // eslint-disable-next-line sonarjs/hardcoded-secret-signatures -- test fixture, not a real secret + const expected = createHmac("sha256", "app-1.key-1") + .update(JSON.stringify(payload)) + .digest("hex"); + + expect(signPayload("app-1", "key-1", payload)).toBe(expected); + }); + + it("produces different signatures for different appId/apiKey combinations", () => { + const payload = makePayload(); + + const sig1 = signPayload("app-1", "key-1", payload); + const sig2 = signPayload("app-2", "key-2", payload); + + expect(sig1).not.toBe(sig2); + }); + + it("produces the same signature for the same inputs", () => { + const payload = makePayload(); + + const sig1 = signPayload("app-1", "key-1", payload); + const sig2 = signPayload("app-1", "key-1", payload); + + expect(sig1).toBe(sig2); + }); + + it("produces a deterministic non-empty signature for an empty payload object", () => { + const emptyPayload = {} as Parameters[2]; + + const sig = signPayload("app-1", "key-1", emptyPayload); + + expect(sig).toBeTruthy(); + expect(typeof sig).toBe("string"); + expect(sig.length).toBeGreaterThan(0); + + const sig2 = signPayload("app-1", "key-1", emptyPayload); + expect(sig).toBe(sig2); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/record-result-lua.test.ts b/lambdas/https-client-lambda/src/__tests__/record-result-lua.test.ts new file mode 100644 index 00000000..515f1377 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/record-result-lua.test.ts @@ -0,0 +1,373 @@ +import recordResultLuaSrc from "services/record-result.lua"; +import { createRedisStore, evalLua } from "__tests__/helpers/lua-redis-mock"; + +// ARGV: [now, success, cooldownMs, decayPeriodMs, cbErrorThreshold, cbMinAttempts, cbWindowPeriodMs] +// KEYS: [cbKey] +// Returns: [ok (0|1), state] state: "closed" | "opened" | "failed" + +type RecordResultArgs = { + now: number; + success: boolean; + cooldownMs: number; + decayPeriodMs: number; + cbErrorThreshold: number; + cbMinAttempts: number; + cbWindowPeriodMs: number; +}; + +const defaultArgs: RecordResultArgs = { + now: 1_000_000, + success: true, + cooldownMs: 60_000, + decayPeriodMs: 300_000, + cbErrorThreshold: 0.5, + cbMinAttempts: 10, + cbWindowPeriodMs: 60_000, +}; + +type RecordResultResult = [number, string]; + +function runRecordResult( + store: ReturnType, + args: Partial = {}, + targetId = "t1", +): RecordResultResult { + const merged = { ...defaultArgs, ...args }; + return evalLua( + recordResultLuaSrc, + [`cb:${targetId}`], + [ + merged.now.toString(), + merged.success ? "1" : "0", + merged.cooldownMs.toString(), + merged.decayPeriodMs.toString(), + merged.cbErrorThreshold.toString(), + merged.cbMinAttempts.toString(), + merged.cbWindowPeriodMs.toString(), + ], + store, + ) as RecordResultResult; +} + +describe("record-result.lua", () => { + describe("success recording", () => { + it("returns closed state for a successful result", () => { + const store = createRedisStore(); + const [ok, state] = runRecordResult(store, { success: true }); + + expect(ok).toBe(1); + expect(state).toBe("closed"); + }); + + it("increments attempt count without incrementing failures", () => { + const store = createRedisStore(); + runRecordResult(store, { success: true }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("cb_attempts")).toBe("1"); + expect(cbHash.get("cb_failures")).toBe("0"); + }); + }); + + describe("failure recording", () => { + it("increments both attempts and failures on error", () => { + const store = createRedisStore(); + runRecordResult(store, { success: false }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("cb_attempts")).toBe("1"); + expect(cbHash.get("cb_failures")).toBe("1"); + }); + + it("returns failed state for a single failure below threshold", () => { + const store = createRedisStore(); + const [ok, state] = runRecordResult(store, { success: false }); + + expect(ok).toBe(0); + expect(state).toBe("failed"); + }); + + it("stays closed when below error threshold", () => { + const store = createRedisStore(); + const now = 1_000_000; + + for (let i = 0; i < 8; i++) { + runRecordResult(store, { now, success: true }); + } + for (let i = 0; i < 2; i++) { + runRecordResult(store, { now, success: false }); + } + + const [ok, state] = runRecordResult(store, { now, success: true }); + expect(ok).toBe(1); + expect(state).toBe("closed"); + }); + }); + + describe("circuit opening", () => { + it("opens circuit when error rate exceeds threshold", () => { + const store = createRedisStore(); + const now = 1_000_000; + + for (let i = 0; i < 4; i++) { + const [, state] = runRecordResult(store, { + now, + success: false, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + expect(state).toBe("failed"); + } + + const [ok, state] = runRecordResult(store, { + now, + success: false, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + expect(ok).toBe(0); + expect(state).toBe("opened"); + }); + + it("does not open circuit when below minimum attempts", () => { + const store = createRedisStore(); + const now = 1_000_000; + + for (let i = 0; i < 4; i++) { + runRecordResult(store, { + now, + success: false, + cbMinAttempts: 10, + }); + } + + const [ok, state] = runRecordResult(store, { + now, + success: false, + cbMinAttempts: 10, + }); + expect(ok).toBe(0); + expect(state).toBe("failed"); + }); + + it("sets opened_until_ms with cooldown on open", () => { + const store = createRedisStore(); + const now = 1_000_000; + const cooldownMs = 30_000; + + for (let i = 0; i < 5; i++) { + runRecordResult(store, { + now, + success: false, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + cooldownMs, + }); + } + + const cbHash = store.get("cb:t1")!; + expect(Number(cbHash.get("opened_until_ms"))).toBe(now + cooldownMs); + }); + + it("resets all counters on open", () => { + const store = createRedisStore(); + const now = 1_000_000; + + for (let i = 0; i < 5; i++) { + runRecordResult(store, { + now, + success: false, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + } + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("cb_failures")).toBe("0"); + expect(cbHash.get("cb_attempts")).toBe("0"); + expect(cbHash.get("cb_window_from")).toBe("0"); + expect(cbHash.get("cb_prev_failures")).toBe("0"); + expect(cbHash.get("cb_prev_attempts")).toBe("0"); + }); + + it("does not double-trip when circuit is already open", () => { + const store = createRedisStore(); + const now = 1_000_000; + const openedUntil = now + 60_000; + + store.set( + "cb:t1", + new Map([ + ["opened_until_ms", openedUntil.toString()], + ["cb_window_from", now.toString()], + ]), + ); + + for (let i = 0; i < 20; i++) { + const [, state] = runRecordResult(store, { + now, + success: false, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + expect(state).toBe("failed"); + } + + const cbHash = store.get("cb:t1")!; + expect(Number(cbHash.get("opened_until_ms"))).toBe(openedUntil); + }); + }); + + describe("two-window blended rate", () => { + it("blends previous window failures into current assessment", () => { + const store = createRedisStore(); + const now = 1_000_000; + const cbWindowPeriodMs = 60_000; + + store.set( + "cb:t1", + new Map([ + ["cb_window_from", now.toString()], + ["cb_prev_failures", "8"], + ["cb_prev_attempts", "10"], + ]), + ); + + const [ok, state] = runRecordResult(store, { + now, + success: false, + cbWindowPeriodMs, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + expect(ok).toBe(0); + expect(state).toBe("opened"); + }); + + it("reduces previous window weight as current window ages", () => { + const store = createRedisStore(); + const cbWindowPeriodMs = 100_000; + const t0 = 1_000_000; + const nearEnd = t0 + cbWindowPeriodMs - 1; + + store.set( + "cb:t1", + new Map([ + ["cb_window_from", t0.toString()], + ["cb_prev_failures", "10"], + ["cb_prev_attempts", "10"], + ]), + ); + + for (let i = 0; i < 20; i++) { + runRecordResult(store, { + now: nearEnd, + success: true, + cbWindowPeriodMs, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + } + + const [, state] = runRecordResult(store, { + now: nearEnd, + success: false, + cbWindowPeriodMs, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + expect(state).toBe("failed"); + }); + + it("ignores previous window when cbWindowPeriodMs is 0", () => { + const store = createRedisStore(); + const now = 1_000_000; + + store.set( + "cb:t1", + new Map([ + ["cb_window_from", now.toString()], + ["cb_prev_failures", "100"], + ["cb_prev_attempts", "100"], + ]), + ); + + const [, state] = runRecordResult(store, { + now, + success: false, + cbWindowPeriodMs: 0, + cbMinAttempts: 5, + cbErrorThreshold: 0.5, + }); + expect(state).toBe("failed"); + }); + }); + + describe("decay period", () => { + it("preserves opened_until_ms during active decay", () => { + const store = createRedisStore(); + const openedUntil = 1_060_000; + const duringDecay = openedUntil + 100_000; + + store.set( + "cb:t1", + new Map([["opened_until_ms", openedUntil.toString()]]), + ); + + runRecordResult(store, { + now: duringDecay, + success: true, + decayPeriodMs: 300_000, + }); + + const cbHash = store.get("cb:t1")!; + expect(Number(cbHash.get("opened_until_ms"))).toBe(openedUntil); + }); + + it("clears opened_until_ms after decay period elapses", () => { + const store = createRedisStore(); + const openedUntil = 1_060_000; + const decayPeriodMs = 300_000; + const afterDecay = openedUntil + decayPeriodMs + 1; + + store.set( + "cb:t1", + new Map([["opened_until_ms", openedUntil.toString()]]), + ); + + runRecordResult(store, { + now: afterDecay, + success: true, + decayPeriodMs, + }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("opened_until_ms")).toBe("0"); + }); + + it("clears opened_until_ms when circuit was never opened", () => { + const store = createRedisStore(); + const now = 1_000_000; + + runRecordResult(store, { now, success: true }); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.get("opened_until_ms")).toBe("0"); + }); + }); + + describe("state persistence", () => { + it("writes all counter fields to redis", () => { + const store = createRedisStore(); + runRecordResult(store); + + const cbHash = store.get("cb:t1")!; + expect(cbHash.has("opened_until_ms")).toBe(true); + expect(cbHash.has("cb_window_from")).toBe(true); + expect(cbHash.has("cb_failures")).toBe(true); + expect(cbHash.has("cb_attempts")).toBe(true); + expect(cbHash.has("cb_prev_failures")).toBe(true); + expect(cbHash.has("cb_prev_attempts")).toBe(true); + }); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/redis-client.test.ts b/lambdas/https-client-lambda/src/__tests__/redis-client.test.ts new file mode 100644 index 00000000..3cd9513f --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/redis-client.test.ts @@ -0,0 +1,123 @@ +import { getRedisClient, resetRedisClient } from "services/redis-client"; + +jest.mock("@nhs-notify-client-callbacks/logger"); + +const mockPresign = jest.fn().mockResolvedValue({ + hostname: "cache.example.invalid", + path: "/", + query: { "X-Amz-Signature": "mock-sig" }, +}); + +jest.mock("@smithy/signature-v4", () => ({ + SignatureV4: jest.fn().mockImplementation(() => ({ presign: mockPresign })), +})); + +jest.mock("@aws-sdk/credential-providers", () => ({ + fromNodeProviderChain: jest.fn(), +})); + +const mockSendCommand = jest.fn(); +const mockConnect = jest.fn().mockResolvedValue(undefined); +const mockDisconnect = jest.fn().mockResolvedValue(undefined); +const mockOn = jest.fn(); + +jest.mock("@redis/client", () => ({ + createClient: jest.fn(() => ({ + sendCommand: mockSendCommand, + connect: mockConnect, + disconnect: mockDisconnect, + on: mockOn, + isOpen: true, + })), +})); + +beforeEach(() => { + jest.clearAllMocks(); + resetRedisClient(); + delete process.env.ELASTICACHE_ENDPOINT; + delete process.env.ELASTICACHE_CACHE_NAME; + delete process.env.ELASTICACHE_IAM_USERNAME; +}); + +describe("getRedisClient", () => { + it("throws when ELASTICACHE_ENDPOINT is not set", async () => { + await expect(getRedisClient()).rejects.toThrow( + "ELASTICACHE_ENDPOINT is required", + ); + }); + + it("throws when ELASTICACHE_IAM_USERNAME is not set", async () => { + process.env.ELASTICACHE_ENDPOINT = "cache.example.invalid"; + + await expect(getRedisClient()).rejects.toThrow( + "ELASTICACHE_IAM_USERNAME is required", + ); + }); + + it("throws when ELASTICACHE_CACHE_NAME is not set", async () => { + process.env.ELASTICACHE_ENDPOINT = "cache.example.invalid"; + process.env.ELASTICACHE_IAM_USERNAME = "iam-user"; + + await expect(getRedisClient()).rejects.toThrow( + "ELASTICACHE_CACHE_NAME, ELASTICACHE_ENDPOINT, and ELASTICACHE_IAM_USERNAME are required", + ); + }); + + it("creates and connects a Redis client with IAM token", async () => { + process.env.ELASTICACHE_ENDPOINT = "cache.example.invalid"; + process.env.ELASTICACHE_CACHE_NAME = "my-cache"; + process.env.ELASTICACHE_IAM_USERNAME = "iam-user"; + + const client = await getRedisClient(); + + expect(client).toBeDefined(); + expect(mockPresign).toHaveBeenCalled(); + expect(mockConnect).toHaveBeenCalled(); + }); + + it("returns cached client when already open and token is valid", async () => { + process.env.ELASTICACHE_ENDPOINT = "cache.example.invalid"; + process.env.ELASTICACHE_CACHE_NAME = "my-cache"; + process.env.ELASTICACHE_IAM_USERNAME = "iam-user"; + + const first = await getRedisClient(); + const second = await getRedisClient(); + + expect(first).toBe(second); + expect(mockConnect).toHaveBeenCalledTimes(1); + expect(mockPresign).toHaveBeenCalledTimes(1); + }); + + it("registers error handler on client", async () => { + process.env.ELASTICACHE_ENDPOINT = "cache.example.invalid"; + process.env.ELASTICACHE_CACHE_NAME = "my-cache"; + process.env.ELASTICACHE_IAM_USERNAME = "iam-user"; + + await getRedisClient(); + + expect(mockOn).toHaveBeenCalledWith("error", expect.any(Function)); + + const errorHandler = mockOn.mock.calls.find( + (c: unknown[]) => c[0] === "error", + )![1] as (err: Error) => void; + errorHandler(new Error("test error")); + }); + + it("disconnects existing client when token expires before reconnecting", async () => { + jest.useFakeTimers(); + process.env.ELASTICACHE_ENDPOINT = "cache.example.invalid"; + process.env.ELASTICACHE_CACHE_NAME = "my-cache"; + process.env.ELASTICACHE_IAM_USERNAME = "iam-user"; + + await getRedisClient(); + + jest.advanceTimersByTime(841_000); + + await getRedisClient(); + + expect(mockDisconnect).toHaveBeenCalledTimes(1); + expect(mockConnect).toHaveBeenCalledTimes(2); + + jest.useRealTimers(); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/retry-policy.test.ts b/lambdas/https-client-lambda/src/__tests__/retry-policy.test.ts new file mode 100644 index 00000000..de828762 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/retry-policy.test.ts @@ -0,0 +1,176 @@ +import type { SQSRecord } from "aws-lambda"; +import { + exceedsSqsMaxVisibility, + handleRateLimitedRecord, + isWindowExhausted, + jitteredBackoffSeconds, + parseRetryAfter, +} from "services/delivery/retry-policy"; +import { VisibilityManagedError } from "services/visibility-managed-error"; + +const mockSendToDlq = jest.fn(); +jest.mock("services/dlq-sender", () => ({ + sendToDlq: (...args: unknown[]) => mockSendToDlq(...args), +})); + +const mockChangeVisibility = jest.fn(); +jest.mock("services/sqs-visibility", () => ({ + changeVisibility: (...args: unknown[]) => mockChangeVisibility(...args), +})); + +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { info: jest.fn(), warn: jest.fn(), error: jest.fn() }, +})); + +describe("jitteredBackoffSeconds", () => { + it("produces value in [1, 5) at receiveCount=1", () => { + for (let i = 0; i < 100; i++) { + const val = jitteredBackoffSeconds(1); + expect(val).toBeGreaterThanOrEqual(1); + expect(val).toBeLessThan(5); + } + }); + + it("produces value in [1, 300) at receiveCount=10 (cap)", () => { + for (let i = 0; i < 100; i++) { + const val = jitteredBackoffSeconds(10); + expect(val).toBeGreaterThanOrEqual(1); + expect(val).toBeLessThan(300); + } + }); + + it("respects cap at very high receiveCount", () => { + for (let i = 0; i < 50; i++) { + const val = jitteredBackoffSeconds(100); + expect(val).toBeLessThan(300); + } + }); +}); + +describe("parseRetryAfter", () => { + it("parses integer string", () => { + expect(parseRetryAfter("120")).toBe(120); + }); + + it("returns 0 for negative values", () => { + expect(parseRetryAfter("-5")).toBe(0); + }); + + it("parses HTTP date string", () => { + const futureDate = new Date(Date.now() + 60_000); + const result = parseRetryAfter(futureDate.toUTCString()); + expect(result).toBeGreaterThanOrEqual(58); + expect(result).toBeLessThanOrEqual(61); + }); + + it("returns 0 for past HTTP date", () => { + const pastDate = new Date(Date.now() - 60_000); + expect(parseRetryAfter(pastDate.toUTCString())).toBe(0); + }); + + it("returns 0 for garbage input", () => { + expect(parseRetryAfter("not-a-date-or-number")).toBe(0); + }); +}); + +describe("isWindowExhausted", () => { + it("returns false just below limit", () => { + const firstReceived = Date.now() - 999; + expect(isWindowExhausted(firstReceived, 1000)).toBe(false); + }); + + it("returns true at limit", () => { + const firstReceived = Date.now() - 1000; + expect(isWindowExhausted(firstReceived, 1000)).toBe(true); + }); + + it("returns true beyond limit", () => { + const firstReceived = Date.now() - 2000; + expect(isWindowExhausted(firstReceived, 1000)).toBe(true); + }); +}); + +describe("exceedsSqsMaxVisibility", () => { + it("returns false at 43200", () => { + expect(exceedsSqsMaxVisibility(43_200)).toBe(false); + }); + + it("returns true at 43201", () => { + expect(exceedsSqsMaxVisibility(43_201)).toBe(true); + }); +}); + +const makeRecord = (overrides: Partial = {}): SQSRecord => ({ + messageId: "msg-1", + receiptHandle: "receipt-1", + body: JSON.stringify({ + payload: {}, + subscriptionId: "sub-1", + targetId: "target-1", + }), + attributes: { + ApproximateReceiveCount: "1", + SentTimestamp: "0", + SenderId: "sender", + ApproximateFirstReceiveTimestamp: "0", + }, + messageAttributes: {}, + md5OfBody: "abc", + eventSource: "aws:sqs", + eventSourceARN: "arn:aws:sqs:eu-west-2:123:queue", + awsRegion: "eu-west-2", + ...overrides, +}); + +describe("handleRateLimitedRecord", () => { + beforeEach(() => { + jest.clearAllMocks(); + mockSendToDlq.mockResolvedValue(undefined); + mockChangeVisibility.mockResolvedValue(undefined); + }); + + it("sends to DLQ and returns when Retry-After exceeds SQS max visibility", async () => { + await handleRateLimitedRecord( + makeRecord(), + "client-1", + "target-1", + "50000", + 1, + ); + + expect(mockSendToDlq).toHaveBeenCalledWith(makeRecord().body); + expect(mockChangeVisibility).not.toHaveBeenCalled(); + }); + + it("uses Retry-After value for changeVisibility when within SQS max", async () => { + await expect( + handleRateLimitedRecord(makeRecord(), "client-1", "target-1", "120", 1), + ).rejects.toThrow("Rate limited — requeue"); + + expect(mockChangeVisibility).toHaveBeenCalledWith("receipt-1", 120); + expect(mockSendToDlq).not.toHaveBeenCalled(); + }); + + it("uses jittered backoff when no Retry-After header provided", async () => { + await expect( + handleRateLimitedRecord( + makeRecord(), + "client-1", + "target-1", + undefined, + 1, + ), + ).rejects.toThrow("Rate limited — requeue"); + + expect(mockChangeVisibility).toHaveBeenCalled(); + const [, delaySec] = mockChangeVisibility.mock.calls[0] as [string, number]; + expect(delaySec).toBeGreaterThanOrEqual(0); + expect(delaySec).toBeLessThan(5); + }); + + it("throws after requeuing so SQS marks the record as failed", async () => { + await expect( + handleRateLimitedRecord(makeRecord(), "client-1", "target-1", "30", 1), + ).rejects.toThrow(VisibilityManagedError); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/sqs-visibility.test.ts b/lambdas/https-client-lambda/src/__tests__/sqs-visibility.test.ts new file mode 100644 index 00000000..9e0d9e54 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/sqs-visibility.test.ts @@ -0,0 +1,71 @@ +import { ChangeMessageVisibilityCommand } from "@aws-sdk/client-sqs"; + +import { changeVisibility } from "services/sqs-visibility"; + +const mockSend = jest.fn(); +jest.mock("@aws-sdk/client-sqs", () => { + const actual = jest.requireActual("@aws-sdk/client-sqs"); + return { + ...actual, + SQSClient: jest.fn().mockImplementation(() => ({ + send: (...args: unknown[]) => mockSend(...args), + })), + }; +}); + +process.env.QUEUE_URL = "https://sqs.eu-west-2.invalid/123456789/test-queue"; + +describe("changeVisibility", () => { + beforeEach(() => { + mockSend.mockReset(); + }); + + it("sends ChangeMessageVisibilityCommand with correct params", async () => { + mockSend.mockResolvedValue({}); + + await changeVisibility("receipt-handle-1", 30); + + expect(mockSend).toHaveBeenCalledTimes(1); + const command = mockSend.mock.calls[0][0]; + expect(command).toBeInstanceOf(ChangeMessageVisibilityCommand); + expect(command.input).toEqual({ + QueueUrl: "https://sqs.eu-west-2.invalid/123456789/test-queue", + ReceiptHandle: "receipt-handle-1", + VisibilityTimeout: 30, + }); + }); + + it("floors fractional visibility timeout", async () => { + mockSend.mockResolvedValue({}); + + await changeVisibility("receipt-handle-1", 30.7); + + const command = mockSend.mock.calls[0][0]; + expect(command.input.VisibilityTimeout).toBe(30); + }); + + it("surfaces SDK errors", async () => { + mockSend.mockRejectedValue(new Error("SQS error")); + + await expect(changeVisibility("receipt-handle-1", 30)).rejects.toThrow( + "SQS error", + ); + }); + + it("throws when QUEUE_URL is not set", async () => { + let changeFn: typeof changeVisibility; + const saved = process.env.QUEUE_URL; + delete process.env.QUEUE_URL; + + jest.isolateModules(() => { + // eslint-disable-next-line @typescript-eslint/no-require-imports -- jest.isolateModules requires synchronous require + changeFn = require("services/sqs-visibility").changeVisibility; + }); + + await expect(changeFn!("receipt-handle-1", 30)).rejects.toThrow( + "QUEUE_URL is required", + ); + + process.env.QUEUE_URL = saved; + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/ssm-applications-map.test.ts b/lambdas/https-client-lambda/src/__tests__/ssm-applications-map.test.ts new file mode 100644 index 00000000..059023d1 --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/ssm-applications-map.test.ts @@ -0,0 +1,117 @@ +import { GetParameterCommand } from "@aws-sdk/client-ssm"; + +import { getApplicationId, resetCache } from "services/ssm-applications-map"; + +const mockSend = jest.fn(); +jest.mock("@aws-sdk/client-ssm", () => { + const actual = jest.requireActual("@aws-sdk/client-ssm"); + return { + ...actual, + SSMClient: jest.fn().mockImplementation(() => ({ + send: (...args: unknown[]) => mockSend(...args), + })), + }; +}); + +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + }, +})); + +process.env.APPLICATIONS_MAP_PARAMETER = "/test/applications-map"; + +describe("getApplicationId", () => { + beforeEach(() => { + mockSend.mockReset(); + resetCache(); + }); + + it("returns correct applicationId for a known clientId", async () => { + mockSend.mockResolvedValue({ + Parameter: { + Value: JSON.stringify({ + "client-1": "app-id-1", + "client-2": "app-id-2", + }), + }, + }); + + const result = await getApplicationId("client-1"); + + expect(result).toBe("app-id-1"); + expect(mockSend).toHaveBeenCalledTimes(1); + expect(mockSend.mock.calls[0][0]).toBeInstanceOf(GetParameterCommand); + }); + + it("throws for unknown clientId", async () => { + mockSend.mockResolvedValue({ + Parameter: { + Value: JSON.stringify({ "client-1": "app-id-1" }), + }, + }); + + await expect(getApplicationId("unknown")).rejects.toThrow( + "No applicationId found for clientId 'unknown' in SSM map", + ); + }); + + it("surfaces SSM SDK errors", async () => { + mockSend.mockRejectedValue(new Error("SSM unavailable")); + + await expect(getApplicationId("client-1")).rejects.toThrow( + "SSM unavailable", + ); + }); + + it("throws when APPLICATIONS_MAP_PARAMETER is not set", async () => { + let getFn: typeof getApplicationId; + const saved = process.env.APPLICATIONS_MAP_PARAMETER; + delete process.env.APPLICATIONS_MAP_PARAMETER; + + jest.isolateModules(() => { + // eslint-disable-next-line @typescript-eslint/no-require-imports -- jest.isolateModules requires synchronous require + getFn = require("services/ssm-applications-map").getApplicationId; + }); + + await expect(getFn!("client-1")).rejects.toThrow( + "APPLICATIONS_MAP_PARAMETER is required", + ); + + process.env.APPLICATIONS_MAP_PARAMETER = saved; + }); + + it("throws when SSM parameter value is empty", async () => { + mockSend.mockResolvedValue({ Parameter: { Value: undefined } }); + + await expect(getApplicationId("client-1")).rejects.toThrow( + "not found or has no value", + ); + }); + + it("throws when SSM parameter contains invalid JSON", async () => { + mockSend.mockResolvedValue({ + Parameter: { Value: "not-json" }, + }); + + await expect(getApplicationId("client-1")).rejects.toThrow( + "contains invalid JSON", + ); + }); + + it("caches the applications map between calls", async () => { + mockSend.mockResolvedValue({ + Parameter: { + Value: JSON.stringify({ "client-1": "app-id-1" }), + }, + }); + + await getApplicationId("client-1"); + await getApplicationId("client-1"); + + expect(mockSend).toHaveBeenCalledTimes(1); + }); +}); diff --git a/lambdas/https-client-lambda/src/__tests__/tls-agent-factory.test.ts b/lambdas/https-client-lambda/src/__tests__/tls-agent-factory.test.ts new file mode 100644 index 00000000..fae8112f --- /dev/null +++ b/lambdas/https-client-lambda/src/__tests__/tls-agent-factory.test.ts @@ -0,0 +1,441 @@ +import type { CallbackTarget } from "@nhs-notify-client-callbacks/models"; + +const mockS3Send = jest.fn(); +jest.mock("@aws-sdk/client-s3", () => { + const actual = jest.requireActual("@aws-sdk/client-s3"); + return { + ...actual, + S3Client: jest.fn().mockImplementation(() => ({ send: mockS3Send })), + }; +}); + +const mockSecretsManagerSend = jest.fn(); +jest.mock("@aws-sdk/client-secrets-manager", () => { + const actual = jest.requireActual("@aws-sdk/client-secrets-manager"); + return { + ...actual, + SecretsManagerClient: jest + .fn() + .mockImplementation(() => ({ send: mockSecretsManagerSend })), + }; +}); + +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + }, +})); + +jest.mock("node-forge", () => ({ + pem: { + decode: jest.fn((input: string) => { + const matches = [ + ...(input ?? "").matchAll( + /-----BEGIN ([^-]+)-----[\s\S]*?-----END [^-]+-----/g, + ), + ]; + return matches.map((match) => ({ + type: (match[1] ?? "").trim(), + body: "", + })); + }), + encode: jest.fn( + (obj: { type: string }) => + `-----BEGIN ${obj.type}-----\nZmFrZQ==\n-----END ${obj.type}-----\n`, + ), + }, +})); + +const mockValidTo = new Date(Date.now() + 365 * 86_400_000).toISOString(); + +jest.mock("node:crypto", () => { + const actual = jest.requireActual("node:crypto"); + return { + ...actual, + X509Certificate: class MockX509Certificate { + validTo = mockValidTo; + + publicKey = { + export: () => Buffer.from("mock-spki-der"), + }; + }, + }; +}); + +const TEST_KEY = + "-----BEGIN PRIVATE KEY-----\nfake-key\n-----END PRIVATE KEY-----"; // gitleaks:allow +const TEST_CERT = + "-----BEGIN CERTIFICATE-----\nfake-cert\n-----END CERTIFICATE-----"; +const COMBINED_PEM = `${TEST_KEY}\n${TEST_CERT}`; + +const createTarget = ( + overrides: Partial = {}, +): CallbackTarget => ({ + targetId: "target-1", + type: "API", + invocationEndpoint: "https://webhook.example.invalid", + invocationMethod: "POST", + invocationRateLimit: 10, + apiKey: { headerName: "x-api-key", headerValue: "secret" }, + ...overrides, +}); + +const mockS3PemResponse = (pem: string) => { + mockS3Send.mockResolvedValue({ + Body: { transformToString: jest.fn().mockResolvedValue(pem) }, + }); +}; + +describe("tls-agent-factory", () => { + let buildAgent: typeof import("services/delivery/tls-agent-factory").buildAgent; + let resetCache: typeof import("services/delivery/tls-agent-factory").resetCache; + + beforeEach(async () => { + jest.resetModules(); + + delete process.env.MTLS_CERT_SECRET_ARN; + process.env.MTLS_TEST_CERT_S3_BUCKET = "test-certs-bucket"; + process.env.MTLS_TEST_CERT_S3_KEY = "client.pem"; + delete process.env.MTLS_TEST_CA_S3_KEY; + process.env.CERT_EXPIRY_THRESHOLD_MS = "86400000"; + + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + buildAgent = mod.buildAgent; + resetCache = mod.resetCache; + + mockS3Send.mockReset(); + mockSecretsManagerSend.mockReset(); + }); + + it("builds agent with key and cert when mtls is enabled", async () => { + mockS3PemResponse(COMBINED_PEM); + const agent = await buildAgent( + createTarget({ delivery: { mtls: { enabled: true } } }), + ); + + expect(agent).toBeDefined(); + expect(agent.options.keepAlive).toBe(false); + }); + + it("builds agent without key and cert when mtls is disabled", async () => { + const agent = await buildAgent(createTarget()); + + expect(agent).toBeDefined(); + expect(mockS3Send).not.toHaveBeenCalled(); + expect(mockSecretsManagerSend).not.toHaveBeenCalled(); + }); + + it("loads test CA for server trust when MTLS_TEST_CA_S3_KEY is set and mtls is disabled", async () => { + process.env.MTLS_TEST_CA_S3_KEY = "test-ca.pem"; + jest.resetModules(); + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + + const caPem = + "-----BEGIN CERTIFICATE-----\ntest-ca\n-----END CERTIFICATE-----"; + mockS3Send + .mockResolvedValueOnce({ + Body: { + transformToString: jest.fn().mockResolvedValue(COMBINED_PEM), + }, + }) + .mockResolvedValueOnce({ + Body: { transformToString: jest.fn().mockResolvedValue(caPem) }, + }); + + const agent = await mod.buildAgent( + createTarget({ delivery: { mtls: { enabled: false } } }), + ); + + expect(agent).toBeDefined(); + expect(agent.options.ca).toBe(caPem); + expect(agent.options.key).toBeUndefined(); + expect(agent.options.cert).toBeUndefined(); + }); + + it("loads test CA when MTLS_TEST_CA_S3_KEY is set", async () => { + process.env.MTLS_TEST_CA_S3_KEY = "test-ca.pem"; + jest.resetModules(); + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + + const caPem = + "-----BEGIN CERTIFICATE-----\ntest-ca\n-----END CERTIFICATE-----"; + mockS3Send + .mockResolvedValueOnce({ + Body: { + transformToString: jest.fn().mockResolvedValue(COMBINED_PEM), + }, + }) + .mockResolvedValueOnce({ + Body: { transformToString: jest.fn().mockResolvedValue(caPem) }, + }); + + const agent = await mod.buildAgent( + createTarget({ delivery: { mtls: { enabled: true } } }), + ); + + expect(agent).toBeDefined(); + expect(mockS3Send).toHaveBeenCalledTimes(2); + }); + + it("loads cert from S3 in non-production", async () => { + mockS3PemResponse(COMBINED_PEM); + await buildAgent(createTarget({ delivery: { mtls: { enabled: true } } })); + + expect(mockS3Send).toHaveBeenCalledTimes(1); + expect(mockSecretsManagerSend).not.toHaveBeenCalled(); + }); + + it("loads cert from SecretsManager in production", async () => { + process.env.MTLS_CERT_SECRET_ARN = + "arn:aws:secretsmanager:eu-west-2:123:secret:mtls-cert"; + jest.resetModules(); + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + + mockSecretsManagerSend.mockResolvedValue({ + SecretString: JSON.stringify({ key: TEST_KEY, cert: TEST_CERT }), + }); + + const agent = await mod.buildAgent( + createTarget({ delivery: { mtls: { enabled: true } } }), + ); + + expect(agent).toBeDefined(); + expect(mockSecretsManagerSend).toHaveBeenCalledTimes(1); + expect(mockS3Send).not.toHaveBeenCalled(); + }); + + it("caches cert material on subsequent calls", async () => { + mockS3PemResponse(COMBINED_PEM); + const target = createTarget({ delivery: { mtls: { enabled: true } } }); + + await buildAgent(target); + await buildAgent(target); + + expect(mockS3Send).toHaveBeenCalledTimes(1); + }); + + it("exports PERMANENT_TLS_ERROR_CODES set", async () => { + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + + expect(mod.PERMANENT_TLS_ERROR_CODES).toBeInstanceOf(Set); + expect(mod.PERMANENT_TLS_ERROR_CODES.has("CERT_HAS_EXPIRED")).toBe(true); + }); + + it("resets cached material via resetCache", async () => { + mockS3PemResponse(COMBINED_PEM); + const target = createTarget({ delivery: { mtls: { enabled: true } } }); + + await buildAgent(target); + resetCache(); + await buildAgent(target); + + expect(mockS3Send).toHaveBeenCalledTimes(2); + }); + + it("throws when SecretsManager returns empty SecretString", async () => { + process.env.MTLS_CERT_SECRET_ARN = + "arn:aws:secretsmanager:eu-west-2:123:secret:mtls-cert"; + jest.resetModules(); + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + + mockSecretsManagerSend.mockResolvedValue({ SecretString: undefined }); + + await expect( + mod.buildAgent(createTarget({ delivery: { mtls: { enabled: true } } })), + ).rejects.toThrow("mTLS cert secret has no value"); + }); + + it("throws when S3 env vars are missing in non-production", async () => { + delete process.env.MTLS_TEST_CERT_S3_BUCKET; + delete process.env.MTLS_TEST_CERT_S3_KEY; + jest.resetModules(); + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + + await expect( + mod.buildAgent(createTarget({ delivery: { mtls: { enabled: true } } })), + ).rejects.toThrow( + "MTLS_TEST_CERT_S3_BUCKET and MTLS_TEST_CERT_S3_KEY are required", + ); + }); + + it("throws when S3 object body is empty", async () => { + mockS3Send.mockResolvedValue({ Body: undefined }); + + await expect( + buildAgent(createTarget({ delivery: { mtls: { enabled: true } } })), + ).rejects.toThrow("has no body"); + }); + + it("builds agent with checkServerIdentity when certPinning is enabled", async () => { + mockS3PemResponse(COMBINED_PEM); + const target = createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true, spkiHash: "abc123" }, + }, + }, + }); + + const agent = await buildAgent(target); + + expect(agent).toBeDefined(); + expect(agent.options.checkServerIdentity).toBeDefined(); + }); + + it("checkServerIdentity returns error when SPKI hash does not match", async () => { + mockS3PemResponse(COMBINED_PEM); + const target = createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true, spkiHash: "expected-hash" }, + }, + }, + }); + + const agent = await buildAgent(target); + const checkFn = agent.options.checkServerIdentity as ( + hostname: string, + cert: { raw: Buffer; subject: { CN: string } }, + ) => Error | undefined; + + const mockPeerCert = { + raw: Buffer.from("mock-cert-der"), + subject: { CN: "webhook.example.invalid" }, + subjectaltname: "DNS:webhook.example.invalid", + }; + + const result = checkFn("webhook.example.invalid", mockPeerCert); + + expect(result).toBeInstanceOf(Error); + expect(result!.message).toContain("Certificate pinning failed"); + expect((result as NodeJS.ErrnoException).code).toBe( + "ERR_CERT_PINNING_FAILED", + ); + }); + + it("checkServerIdentity returns undefined when SPKI hash matches", async () => { + const { createHash } = jest.requireActual("node:crypto"); + const expectedHash = createHash("sha256") + .update(Buffer.from("mock-spki-der")) + .digest("base64"); + + mockS3PemResponse(COMBINED_PEM); + const target = createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true, spkiHash: expectedHash }, + }, + }, + }); + + const agent = await buildAgent(target); + const checkFn = agent.options.checkServerIdentity as ( + hostname: string, + cert: { raw: Buffer; subject: { CN: string } }, + ) => Error | undefined; + + const mockPeerCert = { + raw: Buffer.from("mock-cert-der"), + subject: { CN: "webhook.example.invalid" }, + subjectaltname: "DNS:webhook.example.invalid", + }; + + const result = checkFn("webhook.example.invalid", mockPeerCert); + + expect(result).toBeUndefined(); + }); + + it("checkServerIdentity returns default error when hostname does not match", async () => { + mockS3PemResponse(COMBINED_PEM); + const target = createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true, spkiHash: "abc" }, + }, + }, + }); + + const agent = await buildAgent(target); + const checkFn = agent.options.checkServerIdentity as ( + hostname: string, + cert: { raw: Buffer; subject: { CN: string } }, + ) => Error | undefined; + + const mockPeerCert = { + raw: Buffer.from("mock-cert-der"), + subject: { CN: "other.example.invalid" }, + subjectaltname: "DNS:other.example.invalid", + }; + + const result = checkFn("webhook.example.invalid", mockPeerCert); + + expect(result).toBeDefined(); + expect(result!.message).toContain("does not match"); + }); + + it("does not load cert material when mtls is disabled", async () => { + const agent = await buildAgent(createTarget()); + + expect(agent).toBeDefined(); + expect(mockS3Send).not.toHaveBeenCalled(); + expect(mockSecretsManagerSend).not.toHaveBeenCalled(); + }); + + it("throws when certPinning.enabled is true but spkiHash is missing", async () => { + const target = createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true }, + }, + }, + }); + + await expect(buildAgent(target)).rejects.toThrow( + "certPinning.spkiHash is required when certPinning is enabled", + ); + expect(mockS3Send).not.toHaveBeenCalled(); + }); + + it("uses default CERT_EXPIRY_THRESHOLD_MS when env var is not set", async () => { + delete process.env.CERT_EXPIRY_THRESHOLD_MS; + jest.resetModules(); + // @ts-expect-error -- modulePaths resolves at runtime + const mod = await import("services/delivery/tls-agent-factory"); + + mockS3PemResponse(COMBINED_PEM); + const agent = await mod.buildAgent( + createTarget({ delivery: { mtls: { enabled: true } } }), + ); + + expect(agent).toBeDefined(); + }); + + it("handles PEM with no private key or certificate sections", async () => { + mockS3Send.mockResolvedValue({ + Body: { + transformToString: jest.fn().mockResolvedValue("no-pem-content"), + }, + }); + + const agent = await buildAgent( + createTarget({ delivery: { mtls: { enabled: true } } }), + ); + + expect(agent).toBeDefined(); + }); +}); diff --git a/lambdas/https-client-lambda/src/handler.ts b/lambdas/https-client-lambda/src/handler.ts new file mode 100644 index 00000000..28fcc6b9 --- /dev/null +++ b/lambdas/https-client-lambda/src/handler.ts @@ -0,0 +1,284 @@ +import type { SQSBatchItemFailure, SQSRecord } from "aws-lambda"; +import type { ClientCallbackPayload } from "@nhs-notify-client-callbacks/models"; +import pMap from "p-map"; +import { logger } from "@nhs-notify-client-callbacks/logger"; +import { loadTargetConfig } from "services/config-loader"; +import { getApplicationId } from "services/ssm-applications-map"; +import { signPayload } from "services/payload-signer"; +import { buildAgent } from "services/delivery/tls-agent-factory"; +import { + OUTCOME_PERMANENT_FAILURE, + OUTCOME_RATE_LIMITED, + OUTCOME_SUCCESS, + deliverPayload, +} from "services/delivery/https-client"; +import type { DeliveryResult } from "services/delivery/https-client"; +import { sendToDlq } from "services/dlq-sender"; +import { changeVisibility } from "services/sqs-visibility"; +import { + handleRateLimitedRecord, + isWindowExhausted, + jitteredBackoffSeconds, +} from "services/delivery/retry-policy"; +import { + type EndpointGateConfig, + admit, + recordResult, +} from "services/endpoint-gate"; +import { getRedisClient } from "services/redis-client"; +import { VisibilityManagedError } from "services/visibility-managed-error"; +import { + recordAdmissionDenied, + recordCircuitBreakerClosed, + recordCircuitBreakerOpen, + recordDeliveryAttempt, + recordDeliveryDuration, + recordDeliveryFailure, + recordDeliveryPermanentFailure, + recordDeliveryRateLimited, + recordDeliverySuccess, + recordRetryWindowExhausted, +} from "services/delivery-observability"; +import { flushMetrics, resetMetrics } from "services/delivery-metrics"; + +type RedisClientType = Awaited>; + +const DEFAULT_MAX_RETRY_DURATION_MS = 7_200_000; // 2 hours +const DEFAULT_CONCURRENCY_LIMIT = 5; + +const gateConfig: EndpointGateConfig = { + burstCapacity: Number(process.env.TOKEN_BUCKET_BURST_CAPACITY ?? "10"), + cbProbeIntervalMs: Number(process.env.CB_PROBE_INTERVAL_MS ?? "60000"), + decayPeriodMs: Number(process.env.CB_DECAY_PERIOD_MS ?? "300000"), + cbWindowPeriodMs: Number(process.env.CB_WINDOW_PERIOD_MS ?? "60000"), + cbErrorThreshold: Number(process.env.CB_ERROR_THRESHOLD ?? "0.5"), + cbMinAttempts: Number(process.env.CB_MIN_ATTEMPTS ?? "10"), + cbCooldownMs: Number(process.env.CB_COOLDOWN_MS ?? "60000"), +}; + +type CallbackDeliveryMessage = { + payload: ClientCallbackPayload; + subscriptionId: string; + targetId: string; +}; + +async function checkAdmission( + redis: RedisClientType, + targetId: string, + invocationRateLimit: number, + cbEnabled: boolean, + clientId: string, + record: SQSRecord, + correlationId?: string, +): Promise { + const gateResult = await admit( + redis, + targetId, + invocationRateLimit, + cbEnabled, + gateConfig, + ); + + if (!gateResult.allowed) { + const delaySec = Math.ceil(gateResult.retryAfterMs / 1000); + recordAdmissionDenied(clientId, targetId, gateResult.reason, correlationId); + await changeVisibility(record.receiptHandle, delaySec); + throw new VisibilityManagedError(`Admission denied: ${gateResult.reason}`); + } +} + +const OUTCOME_DELIVERED = "delivered" as const; +const OUTCOME_DLQ = "dlq" as const; +type RecordOutcome = typeof OUTCOME_DELIVERED | typeof OUTCOME_DLQ; + +async function handleDeliveryResult( + result: DeliveryResult, + record: SQSRecord, + redis: RedisClientType, + clientId: string, + targetId: string, + cbEnabled: boolean, + correlationId?: string, +): Promise { + if (result.outcome === OUTCOME_SUCCESS) { + if (cbEnabled) { + const cbOutcome = await recordResult(redis, targetId, true, gateConfig); + if (cbOutcome.ok && cbOutcome.state === "closed") { + recordCircuitBreakerClosed(targetId, correlationId); + } + } + recordDeliverySuccess(clientId, targetId, correlationId); + return OUTCOME_DELIVERED; + } + + if (result.outcome === OUTCOME_PERMANENT_FAILURE) { + recordDeliveryPermanentFailure( + clientId, + targetId, + result.statusCode, + result.errorCode, + correlationId, + ); + await sendToDlq(record.body, result); + return OUTCOME_DLQ; + } + + if (result.outcome === OUTCOME_RATE_LIMITED) { + const receiveCount = Number(record.attributes.ApproximateReceiveCount); + recordDeliveryRateLimited(clientId, targetId, correlationId); + await handleRateLimitedRecord( + record, + clientId, + targetId, + result.retryAfterHeader, + receiveCount, + ); + return OUTCOME_DELIVERED; // unreachable — handleRateLimitedRecord always throws + } + + const receiveCount = Number(record.attributes.ApproximateReceiveCount); + const backoffSec = jitteredBackoffSeconds(receiveCount); + if (cbEnabled) { + const cbOutcome = await recordResult(redis, targetId, false, gateConfig); + if (cbOutcome.state === "opened") { + recordCircuitBreakerOpen(targetId, correlationId); + } + } + recordDeliveryFailure( + clientId, + targetId, + result.statusCode, + backoffSec, + receiveCount, + correlationId, + ); + await changeVisibility(record.receiptHandle, backoffSec); + throw new VisibilityManagedError(`Transient failure: ${result.statusCode}`); +} + +async function processRecord( + record: SQSRecord, + redis: RedisClientType, +): Promise { + const { CLIENT_ID } = process.env; + if (!CLIENT_ID) { + throw new Error("CLIENT_ID is required"); + } + + const message: CallbackDeliveryMessage = JSON.parse(record.body); + const { payload, targetId } = message; + const messageId = payload.data[0]?.attributes?.messageId; + + logger.info("Processing delivery", { + clientId: CLIENT_ID, + targetId, + messageId, + sqsMessageId: record.messageId, + receiveCount: record.attributes.ApproximateReceiveCount, + }); + + const target = await loadTargetConfig(CLIENT_ID, targetId); + const maxRetryDurationMs = + target.delivery?.maxRetryDurationSeconds === undefined + ? DEFAULT_MAX_RETRY_DURATION_MS + : target.delivery.maxRetryDurationSeconds * 1000; + + const firstReceivedMs = Number( + record.attributes.ApproximateFirstReceiveTimestamp, + ); + + if (isWindowExhausted(firstReceivedMs, maxRetryDurationMs)) { + recordRetryWindowExhausted(CLIENT_ID, targetId, messageId); + await sendToDlq(record.body); + return OUTCOME_DLQ; + } + + const applicationId = await getApplicationId(CLIENT_ID); + const cbEnabled = target.delivery?.circuitBreaker?.enabled ?? false; + + await checkAdmission( + redis, + targetId, + target.invocationRateLimit, + cbEnabled, + CLIENT_ID, + record, + messageId, + ); + + const agent = await buildAgent(target); + const signature = signPayload( + applicationId, + target.apiKey.headerValue, + payload, + ); + const payloadJson = JSON.stringify(payload); + + recordDeliveryAttempt(CLIENT_ID, targetId, messageId); + const deliveryStart = Date.now(); + const result = await deliverPayload(target, payloadJson, signature, agent); + recordDeliveryDuration(targetId, Date.now() - deliveryStart); + + return handleDeliveryResult( + result, + record, + redis, + CLIENT_ID, + targetId, + cbEnabled, + messageId, + ); +} + +export async function processRecords( + records: SQSRecord[], +): Promise { + resetMetrics(); + + logger.info("Batch received", { batchSize: records.length }); + + const concurrencyLimit = Number( + process.env.CONCURRENCY_LIMIT ?? String(DEFAULT_CONCURRENCY_LIMIT), + ); + + const redis = await getRedisClient(); + + const results = await pMap( + records, + async (record): Promise => { + try { + return await processRecord(record, redis); + } catch (error) { + if (!(error instanceof VisibilityManagedError)) { + logger.error("Failed to process record", { + messageId: record.messageId, + err: error, + }); + const receiveCount = Number( + record.attributes.ApproximateReceiveCount, + ); + await changeVisibility( + record.receiptHandle, + jitteredBackoffSeconds(receiveCount), + ); + } + return { itemIdentifier: record.messageId }; + } + }, + { concurrency: concurrencyLimit }, + ); + + await flushMetrics(); + const failures = results.filter( + (r): r is SQSBatchItemFailure => typeof r === "object", + ); + const deliveredCount = results.filter((r) => r === OUTCOME_DELIVERED).length; + const dlqCount = results.filter((r) => r === OUTCOME_DLQ).length; + logger.info("Batch complete", { + batchSize: records.length, + deliveredCount, + dlqCount, + failureCount: failures.length, + }); + return failures; +} diff --git a/lambdas/https-client-lambda/src/index.ts b/lambdas/https-client-lambda/src/index.ts new file mode 100644 index 00000000..d53608ff --- /dev/null +++ b/lambdas/https-client-lambda/src/index.ts @@ -0,0 +1,7 @@ +import type { SQSBatchResponse, SQSEvent } from "aws-lambda"; +import { processRecords } from "handler"; + +export async function handler(event: SQSEvent): Promise { + const batchItemFailures = await processRecords(event.Records); + return { batchItemFailures }; +} diff --git a/lambdas/https-client-lambda/src/lua.d.ts b/lambdas/https-client-lambda/src/lua.d.ts new file mode 100644 index 00000000..8fe49f84 --- /dev/null +++ b/lambdas/https-client-lambda/src/lua.d.ts @@ -0,0 +1,4 @@ +declare module "*.lua" { + const content: string; + export default content; +} diff --git a/lambdas/https-client-lambda/src/services/admit.lua b/lambdas/https-client-lambda/src/services/admit.lua new file mode 100644 index 00000000..fd56decb --- /dev/null +++ b/lambdas/https-client-lambda/src/services/admit.lua @@ -0,0 +1,203 @@ +-- admit.lua — Decides whether a request to an endpoint is allowed. +-- +-- Three sequential checks run atomically: +-- 1. Circuit breaker — is the endpoint currently healthy? +-- 2. Sliding window — roll the two-window error-rate accounting state if needed +-- 3. Token bucket — is the endpoint within its rate limit? +-- +-- A request is allowed only when all three checks pass. +-- +-- While the circuit is open, a timed probe is let through at most once per +-- cbProbeIntervalMs so the caller can test whether the endpoint has recovered. +-- The probe bypasses the rate limit — counting it here would skew a +-- low-volume probe signal against the recovery decision. +-- +-- After the circuit closes, the token fill rate ramps up linearly from +-- near-zero to full over decayPeriodMs to avoid a thundering herd on recovery. +-- +-- Returns: { allowed (0|1), reason, retryAfterMs, effectiveRate } + +-- Keys +local cbKey = KEYS[1] -- cb:{endpoint} circuit breaker state hash +local rlKey = KEYS[2] -- rl:{endpoint} rate limiter state hash + +-- Arguments +local now = tonumber(ARGV[1]) or 0 -- current wall-clock time (ms) +local capacity = tonumber(ARGV[2]) or 0 -- token bucket maximum capacity +local refillPerSec = tonumber(ARGV[3]) or 0 -- full token fill rate (tokens/sec) +local cooldownMs = tonumber(ARGV[4]) or 0 -- how long the circuit stays open (ms) +local decayPeriodMs = tonumber(ARGV[5]) or 0 -- ramp-up window after circuit closes (ms) +local cbWindowPeriodMs = tonumber(ARGV[6]) or 0 -- error-rate sliding window duration (ms) +local cbProbeIntervalMs = tonumber(ARGV[7]) or 0 -- minimum gap between probe requests (ms; 0 = no probes) + +-- TTL policy: circuit breaker state must outlive the cooldown window so that +-- the ramp-up period remains visible to subsequent calls after a close. +-- Rate limiter state needs only a short idle window. +local cbTtlSeconds = math.ceil(cooldownMs / 1000) + 60 +local rlTtlSeconds = 120 + +-------------------------------------------------------------------------------- +-- LOAD STATE +-------------------------------------------------------------------------------- + +local cb = redis.call("HMGET", cbKey, + "opened_until_ms", "cb_window_from", "cb_failures", "cb_attempts", "last_probe_ms", + "cb_prev_failures", "cb_prev_attempts") +local openedUntil = tonumber(cb[1] or "0") +local cbWindowFrom = tonumber(cb[2] or "0") +local cbFailures = tonumber(cb[3] or "0") +local cbAttempts = tonumber(cb[4] or "0") +local lastProbeMs = tonumber(cb[5] or "0") +local cbPrevFailures = tonumber(cb[6] or "0") +local cbPrevAttempts = tonumber(cb[7] or "0") + +local rl = redis.call("HMGET", rlKey, "tokens", "last_refill_ms") +local tokens = tonumber(rl[1] or capacity) +local lastRefill = tonumber(rl[2] or now) + +-------------------------------------------------------------------------------- +-- 1. CIRCUIT BREAKER +-- +-- The circuit is open when openedUntil is set and has not yet elapsed. +-- All requests are rejected while open to give the endpoint time to recover. +-- +-- Timed probes: once per cbProbeIntervalMs a single request is allowed +-- through even while the circuit is open. The caller must record the +-- outcome via record-result.lua; a successful probe will close the circuit +-- and trigger the ramp-up phase. +-------------------------------------------------------------------------------- + +if openedUntil > 0 and now < openedUntil then + -- Allow a probe through if the probe interval has elapsed + if cbProbeIntervalMs > 0 and (now - lastProbeMs) >= cbProbeIntervalMs then + lastProbeMs = now + redis.call("HSET", cbKey, + "opened_until_ms", openedUntil, + "cb_window_from", cbWindowFrom, + "cb_failures", cbFailures, + "cb_attempts", cbAttempts, + "last_probe_ms", lastProbeMs, + "cb_prev_failures", cbPrevFailures, + "cb_prev_attempts", cbPrevAttempts + ) + redis.call("EXPIRE", cbKey, cbTtlSeconds) + return { 1, "probe", 0, 0 } + end + + -- Circuit is open and no probe slot is available — reject + return { 0, "circuit_open", openedUntil - now, 0 } +end + +-------------------------------------------------------------------------------- +-- 2. SLIDING WINDOW +-- +-- Two windows (current + previous) together approximate a sliding window over +-- cbWindowPeriodMs. When the current window expires it is promoted to previous +-- and a fresh current window starts. record-result.lua blends the two windows +-- using a time-based weight to smooth the error rate across the boundary rather +-- than resetting it to zero at expiry. +-- +-- record-result.lua is responsible for incrementing the counters; this script +-- is only responsible for rolling the window boundary forward when it expires. +-------------------------------------------------------------------------------- + +if cbWindowFrom == 0 then + -- No window exists yet — start one now + cbWindowFrom = now +elseif (now - cbWindowFrom) > cbWindowPeriodMs then + -- Current window has expired — roll it forward + if (now - cbWindowFrom) > (2 * cbWindowPeriodMs) then + -- Both current and previous windows are stale: a long quiet period means + -- old failure counts are no longer relevant to the health of the endpoint. + cbPrevFailures = 0 + cbPrevAttempts = 0 + else + -- Promote current → previous so it can be blended with the new current window + cbPrevFailures = cbFailures + cbPrevAttempts = cbAttempts + end + cbFailures = 0 + cbAttempts = 0 + cbWindowFrom = now +end + +-------------------------------------------------------------------------------- +-- 3. TOKEN BUCKET +-- +-- Refills tokens based on elapsed time, then tries to consume one. +-- If no tokens are available the request is rate-limited. +-- +-- Ramp-up: after the circuit closes (openedUntil is set but in the past), +-- effectiveRate scales linearly from near-zero to the full refillPerSec over +-- decayPeriodMs. This deliberately slows recovery traffic so a flapping +-- endpoint is not immediately overwhelmed. +-- Once decayPeriodMs elapses, openedUntil is cleared and the full rate resumes. +-------------------------------------------------------------------------------- + +local effectiveRate = refillPerSec + +if openedUntil > 0 and now > openedUntil and decayPeriodMs > 0 then + -- Circuit has recently closed — apply linear ramp-up + local sinceClose = now - openedUntil + if sinceClose >= decayPeriodMs then + -- Decay period fully elapsed — restore full rate and clear the CB timestamp + openedUntil = 0 + else + -- Still within decay period — scale fill rate proportionally to time elapsed + local fraction = sinceClose / decayPeriodMs + effectiveRate = math.max(1, math.floor(refillPerSec * fraction)) + end +end + +-- Refill tokens based on time elapsed since last refill +local elapsed = now - lastRefill +if elapsed > 0 then + local refill = math.floor((elapsed * effectiveRate) / 1000) + if refill > 0 then + tokens = math.min(capacity, tokens + refill) + lastRefill = now + end +end + +-- Not enough tokens — rate-limited +-- TTL is intentionally not refreshed here; it was set on the last allowed call. +if tokens < 1 then + redis.call("HSET", cbKey, + "opened_until_ms", openedUntil, + "cb_window_from", cbWindowFrom, + "cb_failures", cbFailures, + "cb_attempts", cbAttempts, + "cb_prev_failures", cbPrevFailures, + "cb_prev_attempts", cbPrevAttempts + ) + redis.call("HSET", rlKey, + "tokens", tokens, + "last_refill_ms", lastRefill + ) + return { 0, "rate_limited", 1000, effectiveRate } +end + +-- Consume one token +tokens = tokens - 1 + +-------------------------------------------------------------------------------- +-- 4. PERSIST STATE AND ALLOW +-------------------------------------------------------------------------------- + +redis.call("HSET", cbKey, + "opened_until_ms", openedUntil, + "cb_window_from", cbWindowFrom, + "cb_failures", cbFailures, + "cb_attempts", cbAttempts, + "cb_prev_failures", cbPrevFailures, + "cb_prev_attempts", cbPrevAttempts +) +redis.call("HSET", rlKey, + "tokens", tokens, + "last_refill_ms", lastRefill +) + +redis.call("EXPIRE", cbKey, cbTtlSeconds) +redis.call("EXPIRE", rlKey, rlTtlSeconds) + +return { 1, "allowed", 0, effectiveRate } diff --git a/lambdas/https-client-lambda/src/services/config-loader.ts b/lambdas/https-client-lambda/src/services/config-loader.ts new file mode 100644 index 00000000..7f5b7bdc --- /dev/null +++ b/lambdas/https-client-lambda/src/services/config-loader.ts @@ -0,0 +1,54 @@ +import { S3Client } from "@aws-sdk/client-s3"; +import type { CallbackTarget } from "@nhs-notify-client-callbacks/models"; +import { ConfigSubscriptionCache } from "@nhs-notify-client-callbacks/config-subscription-cache"; + +const s3Client = new S3Client({}); +let cache: ConfigSubscriptionCache | undefined; + +function getCache(): ConfigSubscriptionCache { + if (!cache) { + const { + CLIENT_SUBSCRIPTION_CONFIG_BUCKET, + CLIENT_SUBSCRIPTION_CONFIG_PREFIX, + } = process.env; + if (!CLIENT_SUBSCRIPTION_CONFIG_BUCKET) { + throw new Error("CLIENT_SUBSCRIPTION_CONFIG_BUCKET is required"); + } + + const ttlMs = + (Number(process.env.CLIENT_SUBSCRIPTION_CACHE_TTL_SECONDS) || 300) * 1000; + + cache = new ConfigSubscriptionCache({ + s3Client, + bucketName: CLIENT_SUBSCRIPTION_CONFIG_BUCKET, + keyPrefix: CLIENT_SUBSCRIPTION_CONFIG_PREFIX ?? "client_subscriptions/", + ttlMs, + }); + } + return cache; +} + +export function resetCache(): void { + cache = undefined; +} + +export async function loadTargetConfig( + clientId: string, + targetId: string, +): Promise { + const clientConfig = await getCache().loadClientConfig(clientId); + + if (!clientConfig) { + throw new Error(`No configuration found for client '${clientId}'`); + } + + const target = clientConfig.targets.find((t) => t.targetId === targetId); + + if (!target) { + throw new Error( + `Target '${targetId}' not found in config for client '${clientId}'`, + ); + } + + return target; +} diff --git a/lambdas/https-client-lambda/src/services/delivery-metrics.ts b/lambdas/https-client-lambda/src/services/delivery-metrics.ts new file mode 100644 index 00000000..68248591 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/delivery-metrics.ts @@ -0,0 +1,100 @@ +import { Unit, createMetricsLogger } from "aws-embedded-metrics"; +import type { MetricsLogger } from "aws-embedded-metrics"; + +let metricsInstance: MetricsLogger | undefined; + +function getMetrics(): MetricsLogger { + if (metricsInstance) { + return metricsInstance; + } + + const namespace = process.env.METRICS_NAMESPACE; + const environment = process.env.ENVIRONMENT; + + if (!namespace) { + throw new Error("METRICS_NAMESPACE environment variable is not set"); + } + if (!environment) { + throw new Error("ENVIRONMENT environment variable is not set"); + } + + metricsInstance = createMetricsLogger(); + metricsInstance.setNamespace(namespace); + metricsInstance.setDimensions({ Environment: environment }); + + return metricsInstance; +} + +export function emitDeliveryAttempt(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("DeliveryAttempt", 1, Unit.Count); +} + +export function emitDeliverySuccess(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("DeliverySuccess", 1, Unit.Count); +} + +export function emitDeliveryFailure(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("DeliveryFailure", 1, Unit.Count); +} + +export function emitDeliveryPermanentFailure(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("DeliveryPermanentFailure", 1, Unit.Count); +} + +export function emitRateLimited(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("DeliveryRateLimited", 1, Unit.Count); +} + +export function emitCircuitBreakerOpen(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("CircuitBreakerOpen", 1, Unit.Count); +} + +export function emitCircuitBreakerClosed(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("CircuitBreakerClosed", 1, Unit.Count); +} + +export function emitRetryWindowExhausted(targetId: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("DeliveryRetryWindowExhausted", 1, Unit.Count); +} + +export function emitAdmissionDenied(targetId: string, reason: string): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.setProperty("reason", reason); + metrics.putMetric("AdmissionDenied", 1, Unit.Count); +} + +export function emitDeliveryDuration( + targetId: string, + durationMs: number, +): void { + const metrics = getMetrics(); + metrics.setProperty("targetId", targetId); + metrics.putMetric("DeliveryDurationMs", durationMs, Unit.Milliseconds); +} + +export async function flushMetrics(): Promise { + if (metricsInstance) { + await metricsInstance.flush(); + } +} + +export function resetMetrics(): void { + metricsInstance = undefined; +} diff --git a/lambdas/https-client-lambda/src/services/delivery-observability.ts b/lambdas/https-client-lambda/src/services/delivery-observability.ts new file mode 100644 index 00000000..ed41df8a --- /dev/null +++ b/lambdas/https-client-lambda/src/services/delivery-observability.ts @@ -0,0 +1,127 @@ +import { logger } from "@nhs-notify-client-callbacks/logger"; +import { + emitAdmissionDenied, + emitCircuitBreakerClosed, + emitCircuitBreakerOpen, + emitDeliveryAttempt, + emitDeliveryDuration, + emitDeliveryFailure, + emitDeliveryPermanentFailure, + emitDeliverySuccess, + emitRateLimited, + emitRetryWindowExhausted, +} from "services/delivery-metrics"; + +export function recordDeliveryAttempt( + clientId: string, + targetId: string, + correlationId?: string, +): void { + emitDeliveryAttempt(targetId); + logger.info("Attempting delivery", { clientId, targetId, correlationId }); +} + +export function recordDeliverySuccess( + clientId: string, + targetId: string, + correlationId?: string, +): void { + emitDeliverySuccess(targetId); + logger.info("Delivery succeeded", { clientId, targetId, correlationId }); +} + +export function recordDeliveryPermanentFailure( + clientId: string, + targetId: string, + statusCode?: number, + errorCode?: string, + correlationId?: string, +): void { + emitDeliveryPermanentFailure(targetId); + logger.warn("Permanent delivery failure — sending to DLQ", { + clientId, + targetId, + correlationId, + ...(statusCode !== undefined && { statusCode }), + ...(errorCode !== undefined && { errorCode }), + }); +} + +export function recordDeliveryRateLimited( + clientId: string, + targetId: string, + correlationId?: string, +): void { + emitRateLimited(targetId); + logger.info("Rate limited (429)", { clientId, targetId, correlationId }); +} + +export function recordDeliveryFailure( + clientId: string, + targetId: string, + statusCode: number, + backoffSec: number, + receiveCount: number, + correlationId?: string, +): void { + emitDeliveryFailure(targetId); + logger.warn("Transient delivery failure — requeuing", { + clientId, + targetId, + correlationId, + statusCode, + backoffSec, + receiveCount, + }); +} + +export function recordCircuitBreakerOpen( + targetId: string, + correlationId?: string, +): void { + emitCircuitBreakerOpen(targetId); + logger.warn("Circuit breaker opened", { targetId, correlationId }); +} + +export function recordCircuitBreakerClosed( + targetId: string, + correlationId?: string, +): void { + emitCircuitBreakerClosed(targetId); + logger.info("Circuit breaker closed", { targetId, correlationId }); +} + +export function recordRetryWindowExhausted( + clientId: string, + targetId: string, + correlationId?: string, +): void { + emitRetryWindowExhausted(targetId); + logger.warn("Retry window exhausted — sending to DLQ", { + clientId, + targetId, + correlationId, + }); +} + +export function recordAdmissionDenied( + clientId: string, + targetId: string, + reason: string, + correlationId?: string, +): void { + emitAdmissionDenied(targetId, reason); + logger.warn("Admission denied", { + clientId, + targetId, + correlationId, + reason, + }); +} + +export function recordDeliveryDuration( + targetId: string, + durationMs: number, +): void { + emitDeliveryDuration(targetId, durationMs); +} diff --git a/lambdas/https-client-lambda/src/services/delivery/https-client.ts b/lambdas/https-client-lambda/src/services/delivery/https-client.ts new file mode 100644 index 00000000..dfe142f8 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/delivery/https-client.ts @@ -0,0 +1,103 @@ +import https from "node:https"; +import type { Agent } from "node:https"; +import type { CallbackTarget } from "@nhs-notify-client-callbacks/models"; +import { PERMANENT_TLS_ERROR_CODES } from "services/delivery/tls-agent-factory"; + +export const OUTCOME_SUCCESS = "success" as const; +export const OUTCOME_PERMANENT_FAILURE = "permanent_failure" as const; +export const OUTCOME_RATE_LIMITED = "rate_limited" as const; +export const OUTCOME_TRANSIENT_FAILURE = "transient_failure" as const; + +export type DeliveryResult = + | { outcome: typeof OUTCOME_SUCCESS } + | { + outcome: typeof OUTCOME_PERMANENT_FAILURE; + statusCode?: number; + errorCode?: string; + responseBody?: string; + } + | { + outcome: typeof OUTCOME_RATE_LIMITED; + statusCode: 429; + retryAfterHeader: string | undefined; + } + | { outcome: typeof OUTCOME_TRANSIENT_FAILURE; statusCode: number }; + +export function deliverPayload( + target: CallbackTarget, + signedPayloadJson: string, + signatureHeader: string, + agent: Agent, +): Promise { + const requestTimeoutMs = Number(process.env.REQUEST_TIMEOUT_MS ?? "30000"); + + return new Promise((resolve) => { + const url = new URL(target.invocationEndpoint); + + const req = https.request( + url, + { + method: target.invocationMethod, + agent, + timeout: requestTimeoutMs, + headers: { + "Content-Type": "application/json", + "x-hmac-sha256-signature": signatureHeader, + [target.apiKey.headerName]: target.apiKey.headerValue, + }, + }, + (res) => { + const statusCode = res.statusCode ?? 0; + + if (statusCode >= 200 && statusCode < 300) { + res.resume(); + resolve({ outcome: OUTCOME_SUCCESS }); + return; + } + + if (statusCode === 429) { + res.resume(); + const retryAfterHeader = res.headers["retry-after"]; + resolve({ + outcome: OUTCOME_RATE_LIMITED, + statusCode, + retryAfterHeader, + }); + return; + } + + if (statusCode >= 400 && statusCode < 500) { + const chunks: Buffer[] = []; + res.on("data", (chunk: Buffer) => chunks.push(chunk)); + res.on("end", () => { + const responseBody = Buffer.concat(chunks).toString("utf8"); + resolve({ + outcome: OUTCOME_PERMANENT_FAILURE, + statusCode, + responseBody, + }); + }); + return; + } + + res.resume(); + resolve({ outcome: OUTCOME_TRANSIENT_FAILURE, statusCode }); + }, + ); + + req.on("timeout", () => { + req.destroy(new Error("Request timed out")); + }); + + req.on("error", (error: NodeJS.ErrnoException) => { + if (error.code && PERMANENT_TLS_ERROR_CODES.has(error.code)) { + resolve({ outcome: OUTCOME_PERMANENT_FAILURE, errorCode: error.code }); + return; + } + + resolve({ outcome: OUTCOME_TRANSIENT_FAILURE, statusCode: 0 }); + }); + + req.end(signedPayloadJson); + }); +} diff --git a/lambdas/https-client-lambda/src/services/delivery/retry-policy.ts b/lambdas/https-client-lambda/src/services/delivery/retry-policy.ts new file mode 100644 index 00000000..2bde6516 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/delivery/retry-policy.ts @@ -0,0 +1,80 @@ +import type { SQSRecord } from "aws-lambda"; +import { logger } from "@nhs-notify-client-callbacks/logger"; +import { sendToDlq } from "services/dlq-sender"; +import { changeVisibility } from "services/sqs-visibility"; +import { VisibilityManagedError } from "services/visibility-managed-error"; + +const BACKOFF_CAP_SECONDS = 300; +const SQS_MAX_VISIBILITY_SECONDS = 43_200; +const BASE_BACKOFF_MULTIPLIER = 5; +const BACKOFF_EXPONENT_BASE = 2; + +export function jitteredBackoffSeconds(receiveCount: number): number { + const ceiling = Math.min( + BASE_BACKOFF_MULTIPLIER * BACKOFF_EXPONENT_BASE ** (receiveCount - 1), + BACKOFF_CAP_SECONDS, + ); + // eslint-disable-next-line sonarjs/pseudo-random -- jitter for backoff, not security-sensitive + return Math.max(1, Math.floor(Math.random() * ceiling)); +} + +export function parseRetryAfter(header: string): number { + const asInt = Number(header); + + if (!Number.isNaN(asInt) && Number.isFinite(asInt)) { + return Math.max(0, Math.floor(asInt)); + } + + const date = new Date(header); + if (Number.isNaN(date.getTime())) { + return 0; + } + + return Math.max(0, Math.floor((date.getTime() - Date.now()) / 1000)); +} + +export function isWindowExhausted( + firstReceivedMs: number, + maxRetryDurationMs: number, +): boolean { + return Date.now() - firstReceivedMs >= maxRetryDurationMs; +} + +export function exceedsSqsMaxVisibility(retryAfterSeconds: number): boolean { + return retryAfterSeconds > SQS_MAX_VISIBILITY_SECONDS; +} + +export async function handleRateLimitedRecord( + record: SQSRecord, + clientId: string, + targetId: string, + retryAfterHeader: string | undefined, + receiveCount: number, +): Promise { + const retryAfterSeconds = retryAfterHeader + ? parseRetryAfter(retryAfterHeader) + : 0; + + if (exceedsSqsMaxVisibility(retryAfterSeconds)) { + logger.warn("429 Retry-After exceeds SQS max — sending to DLQ", { + clientId, + targetId, + retryAfterSeconds, + }); + await sendToDlq(record.body); + return; + } + + const delaySec = + retryAfterSeconds > 0 + ? retryAfterSeconds + : jitteredBackoffSeconds(receiveCount); + + logger.warn("Rate limited (429) — requeuing", { + clientId, + targetId, + delaySec, + }); + await changeVisibility(record.receiptHandle, delaySec); + throw new VisibilityManagedError("Rate limited — requeue"); +} diff --git a/lambdas/https-client-lambda/src/services/delivery/tls-agent-factory.ts b/lambdas/https-client-lambda/src/services/delivery/tls-agent-factory.ts new file mode 100644 index 00000000..fb1ea136 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/delivery/tls-agent-factory.ts @@ -0,0 +1,208 @@ +import { Agent } from "node:https"; +import { X509Certificate, createHash } from "node:crypto"; +import { checkServerIdentity } from "node:tls"; +import type { PeerCertificate } from "node:tls"; +import forge from "node-forge"; +import { GetObjectCommand, S3Client } from "@aws-sdk/client-s3"; +import { + GetSecretValueCommand, + SecretsManagerClient, +} from "@aws-sdk/client-secrets-manager"; +import type { CallbackTarget } from "@nhs-notify-client-callbacks/models"; +import { logger } from "@nhs-notify-client-callbacks/logger"; + +const { + MTLS_CERT_SECRET_ARN, + MTLS_TEST_CA_S3_KEY, + MTLS_TEST_CERT_S3_BUCKET, + MTLS_TEST_CERT_S3_KEY, +} = process.env; +const CERT_EXPIRY_THRESHOLD_MS = + Number(process.env.CERT_EXPIRY_THRESHOLD_MS) || 86_400_000; // 24 hours + +const s3Client = new S3Client({}); +const secretsClient = new SecretsManagerClient({}); + +export const PERMANENT_TLS_ERROR_CODES = new Set([ + "CERT_HAS_EXPIRED", + "DEPTH_ZERO_SELF_SIGNED_CERT", + "ERR_CERT_PINNING_FAILED", + "ERR_TLS_CERT_ALTNAME_INVALID", + "SELF_SIGNED_CERT_IN_CHAIN", + "UNABLE_TO_VERIFY_LEAF_SIGNATURE", +]); + +type CertMaterial = { + key: string; + cert: string; + ca?: string; + validTo: Date; +}; + +let cachedMaterial: CertMaterial | undefined; + +async function loadFromSecretsManager(): Promise<{ + key: string; + cert: string; +}> { + const response = await secretsClient.send( + new GetSecretValueCommand({ SecretId: MTLS_CERT_SECRET_ARN }), + ); + + if (!response.SecretString) { + throw new Error("mTLS cert secret has no value"); + } + + const parsed = JSON.parse(response.SecretString) as { + key: string; + cert: string; + }; + return { key: parsed.key, cert: parsed.cert }; +} + +async function loadS3Object(bucket: string, key: string): Promise { + const response = await s3Client.send( + new GetObjectCommand({ Bucket: bucket, Key: key }), + ); + + if (!response.Body) { + throw new Error(`S3 object s3://${bucket}/${key} has no body`); + } + + return response.Body.transformToString(); +} + +async function loadFromS3(): Promise<{ + key: string; + cert: string; + ca?: string; +}> { + if (!MTLS_TEST_CERT_S3_BUCKET || !MTLS_TEST_CERT_S3_KEY) { + throw new Error( + "MTLS_TEST_CERT_S3_BUCKET and MTLS_TEST_CERT_S3_KEY are required in non-production", + ); + } + + const pem = await loadS3Object( + MTLS_TEST_CERT_S3_BUCKET, + MTLS_TEST_CERT_S3_KEY, + ); + + const pemObjects = forge.pem.decode(pem); + const keyObj = pemObjects.find((obj) => obj.type.includes("PRIVATE KEY")); + const certObj = pemObjects.find((obj) => obj.type.includes("CERTIFICATE")); + const key = keyObj ? forge.pem.encode(keyObj) : ""; + const cert = certObj ? forge.pem.encode(certObj) : ""; + + let ca: string | undefined; + if (MTLS_TEST_CA_S3_KEY) { + ca = await loadS3Object(MTLS_TEST_CERT_S3_BUCKET, MTLS_TEST_CA_S3_KEY); + } + + return { key, cert, ca }; +} + +async function loadCertMaterial(): Promise { + const isProduction = Boolean(MTLS_CERT_SECRET_ARN); + const raw = isProduction + ? await loadFromSecretsManager() + : await loadFromS3(); + + const x509 = new X509Certificate(raw.cert); + const validTo = new Date(x509.validTo); + + logger.info("mTLS certificate loaded", { + source: isProduction ? "SecretsManager" : "S3", + validTo: validTo.toISOString(), + }); + + return { + key: raw.key, + cert: raw.cert, + ca: "ca" in raw ? (raw.ca as string | undefined) : undefined, + validTo, + }; +} + +function isExpiringSoon(material: CertMaterial): boolean { + return material.validTo.getTime() - Date.now() < CERT_EXPIRY_THRESHOLD_MS; +} + +async function getMaterial(): Promise { + if (cachedMaterial && !isExpiringSoon(cachedMaterial)) { + return cachedMaterial; + } + + cachedMaterial = await loadCertMaterial(); + return cachedMaterial; +} + +export async function buildAgent(target: CallbackTarget): Promise { + const agentOptions: Record = { + keepAlive: false, + }; + + const certPinning = target.delivery?.mtls?.certPinning; + + if (certPinning?.enabled && !certPinning.spkiHash) { + throw new Error( + `certPinning.spkiHash is required when certPinning is enabled for target '${target.targetId}'`, + ); + } + + // Always load the CA in test environments (MTLS_TEST_CA_S3_KEY set) so that + // targets with mtls.enabled: false can still verify the server's cert chain. + // In production the CA comes from SecretsManager only when mTLS is in use. + if (target.delivery?.mtls?.enabled || MTLS_TEST_CA_S3_KEY) { + const material = await getMaterial(); + + if (material.ca) { + agentOptions.ca = material.ca; + } + + if (target.delivery?.mtls?.enabled) { + agentOptions.key = material.key; + agentOptions.cert = material.cert; + } + } + + if (certPinning?.enabled) { + const expectedHash = certPinning.spkiHash!; + + /* eslint-disable sonarjs/function-return-type -- checkServerIdentity requires Error|undefined return */ + agentOptions.checkServerIdentity = ( + hostname: string, + peerCert: PeerCertificate, + ) => { + const defaultResult = checkServerIdentity(hostname, peerCert); + if (defaultResult) { + return defaultResult; + } + + const rawDer = peerCert.raw; + const x509 = new X509Certificate(rawDer); + const spkiDer = x509.publicKey.export({ + type: "spki", + format: "der", + }) as Buffer; + const actualHash = createHash("sha256").update(spkiDer).digest("base64"); + + if (actualHash !== expectedHash) { + const error = new Error( + `Certificate pinning failed: expected SPKI hash '${expectedHash}', got '${actualHash}'`, + ); + (error as NodeJS.ErrnoException).code = "ERR_CERT_PINNING_FAILED"; + return error; + } + + return undefined; + }; + /* eslint-enable sonarjs/function-return-type */ + } + + return new Agent(agentOptions as ConstructorParameters[0]); +} + +export function resetCache(): void { + cachedMaterial = undefined; +} diff --git a/lambdas/https-client-lambda/src/services/dlq-sender.ts b/lambdas/https-client-lambda/src/services/dlq-sender.ts new file mode 100644 index 00000000..56b92405 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/dlq-sender.ts @@ -0,0 +1,70 @@ +import { + type MessageAttributeValue, + SQSClient, + SendMessageCommand, +} from "@aws-sdk/client-sqs"; + +const sqsClient = new SQSClient({}); + +export type DlqErrorInfo = { + statusCode?: number; + errorCode?: string; + responseBody?: string; +}; + +function buildDlqAttributes( + errorInfo: DlqErrorInfo, +): Record { + const attrs: Record = {}; + + if (errorInfo.errorCode) { + attrs.ERROR_CODE = { + DataType: "String", + StringValue: errorInfo.errorCode, + }; + } else if (errorInfo.statusCode !== undefined) { + attrs.ERROR_CODE = { + DataType: "String", + StringValue: "HTTP_CLIENT_ERROR", + }; + } + + if (errorInfo.responseBody) { + let errorMessage = errorInfo.responseBody; + try { + const parsed = JSON.parse(errorInfo.responseBody) as { + message?: string; + }; + if (parsed.message) { + errorMessage = parsed.message; + } + } catch { + // use raw body if not valid JSON + } + attrs.ERROR_MESSAGE = { DataType: "String", StringValue: errorMessage }; + } + + return attrs; +} + +export async function sendToDlq( + messageBody: string, + errorInfo?: DlqErrorInfo, +): Promise { + const { DLQ_URL } = process.env; + if (!DLQ_URL) { + throw new Error("DLQ_URL is required"); + } + + const messageAttributes = errorInfo + ? buildDlqAttributes(errorInfo) + : undefined; + + await sqsClient.send( + new SendMessageCommand({ + QueueUrl: DLQ_URL, + MessageBody: messageBody, + ...(messageAttributes && { MessageAttributes: messageAttributes }), + }), + ); +} diff --git a/lambdas/https-client-lambda/src/services/endpoint-gate.ts b/lambdas/https-client-lambda/src/services/endpoint-gate.ts new file mode 100644 index 00000000..c2d85439 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/endpoint-gate.ts @@ -0,0 +1,170 @@ +import type { RedisClientType } from "services/redis-client"; +import { createHash } from "node:crypto"; +import admitLuaSrc from "services/admit.lua"; +import recordResultLuaSrc from "services/record-result.lua"; + +export type AdmitResultAllowed = { + allowed: true; + probe: boolean; + effectiveRate: number; +}; + +export type AdmitResultDenied = { + allowed: false; + reason: "circuit_open" | "rate_limited"; + retryAfterMs: number; + effectiveRate: number; +}; + +export type AdmitResult = AdmitResultAllowed | AdmitResultDenied; + +export type RecordResultOutcome = + | { ok: true; state: "closed" } + | { ok: false; state: "opened" | "failed" }; + +export type EndpointGateConfig = { + burstCapacity: number; + cbProbeIntervalMs: number; + decayPeriodMs: number; + cbWindowPeriodMs: number; + cbErrorThreshold: number; + cbMinAttempts: number; + cbCooldownMs: number; +}; + +let admitSha: string | undefined; +let recordResultSha: string | undefined; + +function computeSha1(script: string): string { + // eslint-disable-next-line sonarjs/hashing -- SHA-1 required by Redis EVALSHA protocol, not a security context + return createHash("sha1").update(script).digest("hex"); +} + +async function evalScript( + client: RedisClientType, + script: string, + sha: string, + keys: string[], + args: string[], +): Promise { + const keyCount = keys.length.toString(); + try { + return await client.sendCommand([ + "EVALSHA", + sha, + keyCount, + ...keys, + ...args, + ]); + } catch (error: unknown) { + const isNoScript = + error instanceof Error && error.message.includes("NOSCRIPT"); + if (!isNoScript) { + throw new Error( + `Redis error in script ${script}: ${ + error instanceof Error ? error.message : String(error) + }`, + { cause: error }, + ); + } + return client.sendCommand(["EVAL", script, keyCount, ...keys, ...args]); + } +} + +export async function admit( + client: RedisClientType, + targetId: string, + refillPerSec: number, + cbEnabled: boolean, + config: EndpointGateConfig, +): Promise { + const cbKey = `cb:{${targetId}}`; + const rlKey = `rl:{${targetId}}`; + const now = Date.now().toString(); + const probeIntervalMs = cbEnabled ? config.cbProbeIntervalMs.toString() : "0"; + + const args = [ + now, + config.burstCapacity.toString(), + // eslint-disable-next-line sonarjs/null-dereference + refillPerSec.toString(), + config.cbCooldownMs.toString(), + config.decayPeriodMs.toString(), + config.cbWindowPeriodMs.toString(), + probeIntervalMs, + ]; + + if (!admitSha) { + admitSha = computeSha1(admitLuaSrc); + } + + const raw = (await evalScript( + client, + admitLuaSrc, + admitSha, + [cbKey, rlKey], + args, + )) as [number, string, number, number]; + + const [allowed, reason, retryAfterMs, effectiveRate] = raw; + + if (allowed === 1) { + return { + allowed: true, + probe: reason === "probe", + effectiveRate: Number(effectiveRate), + }; + } + + return { + allowed: false, + reason: reason as "circuit_open" | "rate_limited", + retryAfterMs: Number(retryAfterMs), + effectiveRate: Number(effectiveRate), + }; +} + +export async function recordResult( + client: RedisClientType, + targetId: string, + success: boolean, + config: EndpointGateConfig, +): Promise { + const cbKey = `cb:{${targetId}}`; + const now = Date.now().toString(); + + const args = [ + now, + success ? "1" : "0", + config.cbCooldownMs.toString(), + config.decayPeriodMs.toString(), + config.cbErrorThreshold.toString(), + config.cbMinAttempts.toString(), + config.cbWindowPeriodMs.toString(), + ]; + + if (!recordResultSha) { + recordResultSha = computeSha1(recordResultLuaSrc); + } + + const raw = (await evalScript( + client, + recordResultLuaSrc, + recordResultSha, + [cbKey], + args, + )) as [number, string]; + + const [ok, state] = raw; + + if (ok === 1) { + return { ok: true, state: "closed" }; + } + + return { ok: false, state: state as "opened" | "failed" }; +} + +export function resetAdmitSha(): void { + admitSha = undefined; + recordResultSha = undefined; +} diff --git a/lambdas/client-transform-filter-lambda/src/services/payload-signer.ts b/lambdas/https-client-lambda/src/services/payload-signer.ts similarity index 100% rename from lambdas/client-transform-filter-lambda/src/services/payload-signer.ts rename to lambdas/https-client-lambda/src/services/payload-signer.ts index cf69cac8..e2174b76 100644 --- a/lambdas/client-transform-filter-lambda/src/services/payload-signer.ts +++ b/lambdas/https-client-lambda/src/services/payload-signer.ts @@ -2,9 +2,9 @@ import { createHmac } from "node:crypto"; import type { ClientCallbackPayload } from "@nhs-notify-client-callbacks/models"; export function signPayload( - payload: ClientCallbackPayload, applicationId: string, apiKey: string, + payload: ClientCallbackPayload, ): string { return createHmac("sha256", `${applicationId}.${apiKey}`) .update(JSON.stringify(payload)) diff --git a/lambdas/https-client-lambda/src/services/record-result.lua b/lambdas/https-client-lambda/src/services/record-result.lua new file mode 100644 index 00000000..1cc94857 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/record-result.lua @@ -0,0 +1,144 @@ +-- record-result.lua — Records the outcome of a delivery attempt. +-- +-- Updates the circuit breaker's error-rate window counters and opens the +-- circuit if the failure rate exceeds the configured threshold. +-- +-- On success: +-- Window counters are left intact. The openedUntil timestamp is preserved +-- while the decay period is still active so that admit.lua can continue +-- computing the linear ramp-up rate. Once the decay period elapses it +-- is zeroed, returning the circuit to a fully clean closed state. +-- +-- On failure: +-- The failure and attempt counters are incremented. A two-window sliding +-- blend is computed before evaluating the trip condition: +-- slidingAttempts = cbAttempts + cbPrevAttempts * prevWeight +-- slidingFailures = cbFailures + cbPrevFailures * prevWeight +-- where prevWeight decays linearly from 1.0 → 0.0 as the current window ages, +-- so previous-window failures fade out gradually rather than dropping off a cliff. +-- The circuit opens when: +-- • the endpoint is not already open (prevents double-tripping and +-- resetting the cooldown timer prematurely), AND +-- • slidingAttempts >= cbMinAttempts (avoids tripping on statistically +-- insignificant data at cold start or just after a window roll), AND +-- • slidingFailures / slidingAttempts exceeds cbErrorThreshold. +-- On open, all counters (current and previous) are reset to zero so the +-- fresh cooldown window begins with a clean slate ready for recovery. +-- +-- Returns: { ok (0|1), state } +-- state: "closed" | "opened" | "failed" + +-- Keys +local cbKey = KEYS[1] -- cb:{endpoint} circuit breaker state hash + +-- Arguments +local now = tonumber(ARGV[1]) or 0 -- current wall-clock time (ms) +local success = tonumber(ARGV[2]) or 0 -- 1 = success, 0 = failure +local cooldownMs = tonumber(ARGV[3]) or 0 -- how long the circuit stays open (ms) +local decayPeriodMs = tonumber(ARGV[4]) or 0 -- ramp-up window after circuit closes (ms) +local cbErrorThreshold = tonumber(ARGV[5]) or 0 -- error-rate fraction that trips the circuit (e.g. 0.5) +local cbMinAttempts = tonumber(ARGV[6]) or 0 -- minimum samples before the circuit can trip +local cbWindowPeriodMs = tonumber(ARGV[7]) or 0 -- error-rate sliding window duration (ms) + +-- TTL policy: keep circuit breaker state alive for at least the cooldown +-- duration plus a buffer so the decay period remains visible after a close. +local cbTtlSeconds = math.ceil(cooldownMs / 1000) + 60 + +local function refreshCbExpiry() + redis.call("EXPIRE", cbKey, cbTtlSeconds) +end + +-------------------------------------------------------------------------------- +-- LOAD CURRENT STATE +-------------------------------------------------------------------------------- + +local cb = redis.call("HMGET", cbKey, + "opened_until_ms", "cb_window_from", "cb_failures", "cb_attempts", + "cb_prev_failures", "cb_prev_attempts") +local openedUntil = tonumber(cb[1] or "0") +local cbWindowFrom = tonumber(cb[2] or "0") +local cbFailures = tonumber(cb[3] or "0") +local cbAttempts = tonumber(cb[4] or "0") +local cbPrevFailures = tonumber(cb[5] or "0") +local cbPrevAttempts = tonumber(cb[6] or "0") + +-- Every outcome (success or failure) contributes to the error-rate window +cbAttempts = cbAttempts + 1 + +-------------------------------------------------------------------------------- +-- SUCCESS — preserve openedUntil during decay, then zero it +-- +-- admit.lua uses openedUntil to calculate the linear ramp-up rate while the +-- decay period is active. That timestamp must survive in Redis until the +-- decay period ends. Clearing it prematurely would snap the fill rate back +-- to full immediately rather than ramping gradually. +-------------------------------------------------------------------------------- + +if success == 1 then + -- Keep openedUntil only if we are still within the decay window + local inDecayWindow = openedUntil > 0 and now > openedUntil and (now - openedUntil) < decayPeriodMs + local preservedOpenedUntil = inDecayWindow and openedUntil or 0 + + redis.call("HSET", cbKey, + "opened_until_ms", preservedOpenedUntil, + "cb_window_from", cbWindowFrom, + "cb_failures", cbFailures, + "cb_attempts", cbAttempts, + "cb_prev_failures", cbPrevFailures, + "cb_prev_attempts", cbPrevAttempts + ) + refreshCbExpiry() + return { 1, "closed" } +end + +-------------------------------------------------------------------------------- +-- FAILURE — increment counter and evaluate whether to open the circuit +-- +-- The trip condition is evaluated against a sliding blend of current and +-- previous window counts, not the raw current-window counts alone. This +-- prevents a burst of failures from escaping detection simply because it +-- straddles a window boundary and gets partially discarded by a reset. +-------------------------------------------------------------------------------- + +cbFailures = cbFailures + 1 + +-- The circuit is already open when openedUntil is set and has not yet elapsed. +-- Guard against double-tripping, which would reset the cooldown timer early. +local circuitAlreadyOpen = openedUntil > 0 and now < openedUntil + +-- Blend current and previous window counts. +-- prevWeight decays linearly from 1.0 → 0.0 as the current window ages, +-- so previous-window failures fade out gradually rather than dropping off a cliff. +local windowElapsed = cbWindowFrom > 0 and (now - cbWindowFrom) or 0 +local hasWindow = cbWindowPeriodMs > 0 +local prevWeight = hasWindow and math.max(0, (cbWindowPeriodMs - windowElapsed) / cbWindowPeriodMs) or 0 +local slidingFailures = cbFailures + cbPrevFailures * prevWeight +local slidingAttempts = cbAttempts + cbPrevAttempts * prevWeight + +if not circuitAlreadyOpen + and slidingAttempts >= cbMinAttempts -- enough data to be statistically meaningful + and (slidingFailures / slidingAttempts) > cbErrorThreshold then + -- Trip the circuit — reset all counters so recovery starts from a clean slate + redis.call("HSET", cbKey, + "opened_until_ms", now + cooldownMs, + "cb_window_from", 0, + "cb_failures", 0, + "cb_attempts", 0, + "cb_prev_failures", 0, + "cb_prev_attempts", 0 + ) + refreshCbExpiry() + return { 0, "opened" } +end + +-- Below the threshold — record the failure but keep the circuit closed +redis.call("HSET", cbKey, + "opened_until_ms", openedUntil, + "cb_window_from", cbWindowFrom, + "cb_failures", cbFailures, + "cb_attempts", cbAttempts, + "cb_prev_failures", cbPrevFailures, + "cb_prev_attempts", cbPrevAttempts +) +refreshCbExpiry() +return { 0, "failed" } diff --git a/lambdas/https-client-lambda/src/services/redis-client.ts b/lambdas/https-client-lambda/src/services/redis-client.ts new file mode 100644 index 00000000..7d8785c8 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/redis-client.ts @@ -0,0 +1,108 @@ +import { type RedisClientType, createClient } from "@redis/client"; +import { SignatureV4 } from "@smithy/signature-v4"; +import { Sha256 } from "@aws-crypto/sha256-js"; +import { fromNodeProviderChain } from "@aws-sdk/credential-providers"; +import { logger } from "@nhs-notify-client-callbacks/logger"; + +const TOKEN_EXPIRY_SECONDS = 900; +const TOKEN_REFRESH_BUFFER_SECONDS = 60; + +let redisClient: RedisClientType | undefined; +let tokenExpiry = 0; + +async function generateElastiCacheIamToken(): Promise { + const cacheName = process.env.ELASTICACHE_CACHE_NAME; + const endpoint = process.env.ELASTICACHE_ENDPOINT; + const username = process.env.ELASTICACHE_IAM_USERNAME; + + if (!cacheName || !endpoint || !username) { + throw new Error( + "ELASTICACHE_CACHE_NAME, ELASTICACHE_ENDPOINT, and ELASTICACHE_IAM_USERNAME are required", + ); + } + + const region = process.env.AWS_REGION ?? "eu-west-2"; + + const signer = new SignatureV4({ + credentials: fromNodeProviderChain(), + region, + service: "elasticache", + sha256: Sha256, + }); + + const signed = await signer.presign( + { + protocol: "https:", + method: "GET", + hostname: cacheName, + path: "/", + query: { Action: "connect", User: username }, + headers: { host: cacheName }, + }, + { expiresIn: TOKEN_EXPIRY_SECONDS }, + ); + + tokenExpiry = Date.now() + TOKEN_EXPIRY_SECONDS * 1000; + + logger.info("ElastiCache IAM token generated", { + cacheName, + username, + region, + signingAlgorithm: signed.query?.["X-Amz-Algorithm"], + tokenExpiresAt: new Date(tokenExpiry).toISOString(), + }); + + const qs = new URLSearchParams( + signed.query as Record, + ).toString(); + return `${cacheName}/?${qs}`; +} + +export async function getRedisClient(): Promise { + const isTokenValid = + tokenExpiry > Date.now() + TOKEN_REFRESH_BUFFER_SECONDS * 1000; + + if (redisClient?.isOpen && isTokenValid) { + logger.info("Reusing existing Redis client"); + return redisClient; + } + + const endpoint = process.env.ELASTICACHE_ENDPOINT; + if (!endpoint) { + throw new Error("ELASTICACHE_ENDPOINT is required"); + } + + const username = process.env.ELASTICACHE_IAM_USERNAME; + if (!username) { + throw new Error("ELASTICACHE_IAM_USERNAME is required"); + } + + if (redisClient?.isOpen) { + logger.info("Disconnecting Redis client for token refresh"); + await redisClient.disconnect(); + } + + const token = await generateElastiCacheIamToken(); + + logger.info("Connecting to ElastiCache", { endpoint, username }); + + redisClient = createClient({ + url: `rediss://${endpoint}:6379`, + username, + password: token, + }); + + redisClient.on("error", (err) => { + logger.error("Redis connection error", { error: String(err) }); + }); + + await redisClient.connect(); + return redisClient; +} + +export function resetRedisClient(): void { + redisClient = undefined; + tokenExpiry = 0; +} + +export { type RedisClientType } from "@redis/client"; diff --git a/lambdas/https-client-lambda/src/services/sqs-visibility.ts b/lambdas/https-client-lambda/src/services/sqs-visibility.ts new file mode 100644 index 00000000..e6fe2720 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/sqs-visibility.ts @@ -0,0 +1,21 @@ +import { ChangeMessageVisibilityCommand, SQSClient } from "@aws-sdk/client-sqs"; + +const sqsClient = new SQSClient({}); + +export async function changeVisibility( + receiptHandle: string, + visibilityTimeoutSeconds: number, +): Promise { + const { QUEUE_URL } = process.env; + if (!QUEUE_URL) { + throw new Error("QUEUE_URL is required"); + } + + await sqsClient.send( + new ChangeMessageVisibilityCommand({ + QueueUrl: QUEUE_URL, + ReceiptHandle: receiptHandle, + VisibilityTimeout: Math.floor(visibilityTimeoutSeconds), + }), + ); +} diff --git a/lambdas/https-client-lambda/src/services/ssm-applications-map.ts b/lambdas/https-client-lambda/src/services/ssm-applications-map.ts new file mode 100644 index 00000000..999c23d9 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/ssm-applications-map.ts @@ -0,0 +1,69 @@ +import { GetParameterCommand, SSMClient } from "@aws-sdk/client-ssm"; +import { logger } from "@nhs-notify-client-callbacks/logger"; + +const ssmClient = new SSMClient({}); + +const DEFAULT_CACHE_TTL_MS = 300_000; // 5 minutes + +let cachedMap: Map | undefined; +let cacheExpiresAt = 0; + +async function loadMap(): Promise> { + if (cachedMap && Date.now() < cacheExpiresAt) { + return cachedMap; + } + + const { APPLICATIONS_MAP_PARAMETER } = process.env; + if (!APPLICATIONS_MAP_PARAMETER) { + throw new Error("APPLICATIONS_MAP_PARAMETER is required"); + } + + const response = await ssmClient.send( + new GetParameterCommand({ + Name: APPLICATIONS_MAP_PARAMETER, + WithDecryption: true, + }), + ); + + if (!response.Parameter?.Value) { + throw new Error( + `SSM parameter '${APPLICATIONS_MAP_PARAMETER}' not found or has no value`, + ); + } + + let parsed: Record; + try { + parsed = JSON.parse(response.Parameter.Value) as Record; + } catch { + throw new Error( + `SSM parameter '${APPLICATIONS_MAP_PARAMETER}' contains invalid JSON`, + ); + } + + cachedMap = new Map(Object.entries(parsed)); + const ttlMs = + Number(process.env.APPLICATIONS_MAP_CACHE_TTL_MS) || DEFAULT_CACHE_TTL_MS; + cacheExpiresAt = Date.now() + ttlMs; + logger.info("Applications map loaded from SSM", { + parameterName: APPLICATIONS_MAP_PARAMETER, + }); + return cachedMap; +} + +export async function getApplicationId(clientId: string): Promise { + const map = await loadMap(); + const applicationId = map.get(clientId); + + if (!applicationId) { + throw new Error( + `No applicationId found for clientId '${clientId}' in SSM map`, + ); + } + + return applicationId; +} + +export function resetCache(): void { + cachedMap = undefined; + cacheExpiresAt = 0; +} diff --git a/lambdas/https-client-lambda/src/services/visibility-managed-error.ts b/lambdas/https-client-lambda/src/services/visibility-managed-error.ts new file mode 100644 index 00000000..403c2162 --- /dev/null +++ b/lambdas/https-client-lambda/src/services/visibility-managed-error.ts @@ -0,0 +1 @@ +export class VisibilityManagedError extends Error {} diff --git a/tests/performance/tsconfig.json b/lambdas/https-client-lambda/tsconfig.json similarity index 60% rename from tests/performance/tsconfig.json rename to lambdas/https-client-lambda/tsconfig.json index 2cc7bdfa..a50e6fc0 100644 --- a/tests/performance/tsconfig.json +++ b/lambdas/https-client-lambda/tsconfig.json @@ -2,16 +2,13 @@ "compilerOptions": { "isolatedModules": true, "paths": { - "helpers": [ - "./helpers/index" + "*": [ + "./src/*" ] } }, - "exclude": [ - "jest.config.ts" - ], "extends": "../../tsconfig.base.json", "include": [ - "**/*.ts" + "src/**/*" ] } diff --git a/lambdas/mock-webhook-lambda/src/__tests__/index.test.ts b/lambdas/mock-webhook-lambda/src/__tests__/index.test.ts index 6f3cc917..d7463722 100644 --- a/lambdas/mock-webhook-lambda/src/__tests__/index.test.ts +++ b/lambdas/mock-webhook-lambda/src/__tests__/index.test.ts @@ -1,6 +1,14 @@ +import { X509Certificate } from "node:crypto"; import type { APIGatewayProxyEvent } from "aws-lambda"; import { handler } from "index"; +jest.mock("node:crypto", () => ({ + ...jest.requireActual("node:crypto"), + X509Certificate: jest.fn(), +})); + +const mockX509Certificate = X509Certificate as unknown as jest.Mock; + const TEST_API_KEY = "test-api-key"; jest.mock("@nhs-notify-client-callbacks/logger", () => { @@ -32,6 +40,28 @@ const createMockEvent = ( ): APIGatewayProxyEvent => ({ body, headers, rawPath }) as unknown as APIGatewayProxyEvent; +const createAlbEvent = ( + body: string | null, + headers: Record = DEFAULT_HEADERS, + extraHeaders: Record = {}, +): APIGatewayProxyEvent => + ({ + body, + path: "/target-abc", + httpMethod: "POST", + headers: { ...headers, ...extraHeaders }, + requestContext: { + elb: { + targetGroupArn: + "arn:aws:elasticloadbalancing:eu-west-2:123456789012:targetgroup/mock/abc", + }, + }, + }) as unknown as APIGatewayProxyEvent; + +const FAKE_CERT_HEADER = encodeURIComponent( + "-----BEGIN CERTIFICATE-----\nZmFrZQ==\n-----END CERTIFICATE-----", +); + describe("Mock Webhook Lambda", () => { beforeAll(() => { process.env.API_KEY = TEST_API_KEY; @@ -381,3 +411,144 @@ describe("Mock Webhook Lambda", () => { }); }); }); + +describe("ALB mTLS certificate logging", () => { + beforeAll(() => { + process.env.API_KEY = TEST_API_KEY; + }); + + afterAll(() => { + delete process.env.API_KEY; + }); + + beforeEach(() => { + mockX509Certificate.mockReset(); + mockX509Certificate.mockImplementation(() => ({ + validFrom: new Date(Date.now() - 86_400_000).toString(), + validTo: new Date(Date.now() + 86_400_000).toString(), + })); + }); + + it("logs isMtls=false and proceeds when ALB invocation has no client certificate header", async () => { + const event = createAlbEvent(JSON.stringify({ data: [] })); + const result = await handler(event); + + expect(result.statusCode).not.toBe(401); + expect(mockLogger.info).toHaveBeenCalledWith( + "Mock webhook invoked without mTLS", + expect.objectContaining({ isMtls: false }), + ); + }); + + it("logs isMtls=false and proceeds when client certificate header cannot be parsed", async () => { + mockX509Certificate.mockImplementationOnce(() => { + throw new Error("Invalid certificate"); + }); + const event = createAlbEvent( + JSON.stringify({ data: [] }), + DEFAULT_HEADERS, + { "x-amzn-mtls-clientcert": FAKE_CERT_HEADER }, + ); + const result = await handler(event); + + expect(result.statusCode).not.toBe(401); + expect(mockLogger.info).toHaveBeenCalledWith( + "Mock webhook invoked without mTLS", + expect.objectContaining({ isMtls: false }), + ); + }); + + it("logs isMtls=false and proceeds when client certificate is expired", async () => { + mockX509Certificate.mockImplementationOnce(() => ({ + validFrom: new Date(Date.now() - 172_800_000).toString(), + validTo: new Date(Date.now() - 86_400_000).toString(), + })); + const event = createAlbEvent( + JSON.stringify({ data: [] }), + DEFAULT_HEADERS, + { "x-amzn-mtls-clientcert": FAKE_CERT_HEADER }, + ); + const result = await handler(event); + + expect(result.statusCode).not.toBe(401); + expect(mockLogger.info).toHaveBeenCalledWith( + "Mock webhook invoked without mTLS", + expect.objectContaining({ isMtls: false }), + ); + }); + + it("logs isMtls=true and proceeds when certificate is valid", async () => { + const event = createAlbEvent( + JSON.stringify({ data: [] }), + { "x-api-key": "wrong-key" }, + { "x-amzn-mtls-clientcert": FAKE_CERT_HEADER }, + ); + const result = await handler(event); + + expect(mockLogger.info).toHaveBeenCalledWith( + "mTLS client certificate verified", + expect.objectContaining({ isMtls: true }), + ); + expect(result.statusCode).toBe(401); + const body = JSON.parse(result.body); + expect(body.message).toBe("Unauthorized"); + }); + + it("processes request successfully when certificate is valid and API key is correct", async () => { + const callback = { + data: [ + { + type: "MessageStatus", + attributes: { + messageId: "msg-alb-mtls", + messageReference: "ref-alb", + messageStatus: "delivered", + timestamp: "2026-01-01T00:00:00Z", + }, + links: { message: "some-link" }, + meta: { idempotencyKey: "idem-key-alb" }, + }, + ], + }; + const event = createAlbEvent(JSON.stringify(callback), DEFAULT_HEADERS, { + "x-amzn-mtls-clientcert": FAKE_CERT_HEADER, + }); + const result = await handler(event); + + expect(result.statusCode).toBe(200); + const body = JSON.parse(result.body); + expect(body.message).toBe("Callback received"); + }); + + it("processes non-mTLS ALB request successfully when API key is correct", async () => { + const callback = { + data: [ + { + type: "MessageStatus", + attributes: { + messageId: "msg-alb-no-mtls", + messageReference: "ref-alb", + messageStatus: "delivered", + timestamp: "2026-01-01T00:00:00Z", + }, + links: { message: "some-link" }, + meta: { idempotencyKey: "idem-key-alb-no-mtls" }, + }, + ], + }; + const event = createAlbEvent(JSON.stringify(callback), DEFAULT_HEADERS); + const result = await handler(event); + + expect(result.statusCode).toBe(200); + const body = JSON.parse(result.body); + expect(body.message).toBe("Callback received"); + }); + + it("non-ALB invocations skip certificate check", async () => { + const event = createMockEvent(JSON.stringify({ data: [] })); + const result = await handler(event); + + const body = JSON.parse(result.body); + expect(body.message).not.toBe("Mutual TLS authentication required"); + }); +}); diff --git a/lambdas/mock-webhook-lambda/src/index.ts b/lambdas/mock-webhook-lambda/src/index.ts index 081ef3b9..d0bf582d 100644 --- a/lambdas/mock-webhook-lambda/src/index.ts +++ b/lambdas/mock-webhook-lambda/src/index.ts @@ -1,9 +1,33 @@ +import { X509Certificate } from "node:crypto"; import type { APIGatewayProxyEvent, APIGatewayProxyResult } from "aws-lambda"; import { Logger } from "@nhs-notify-client-callbacks/logger"; import type { ClientCallbackPayload } from "@nhs-notify-client-callbacks/models"; const logger = new Logger(); +function verifyClientCertificate(certHeader: string | undefined): { + valid: boolean; + reason?: string; +} { + if (!certHeader) { + return { valid: false, reason: "No client certificate provided" }; + } + try { + const pem = decodeURIComponent(certHeader); + const cert = new X509Certificate(pem); + const now = new Date(); + if (now < new Date(cert.validFrom) || now > new Date(cert.validTo)) { + return { + valid: false, + reason: "Client certificate is not within its validity period", + }; + } + return { valid: true }; + } catch { + return { valid: false, reason: "Failed to parse client certificate" }; + } +} + function isClientCallbackPayload( value: unknown, ): value is ClientCallbackPayload { @@ -36,20 +60,47 @@ function isClientCallbackPayload( async function buildResponse( event: APIGatewayProxyEvent, ): Promise { - const eventWithFunctionUrlFields = event as APIGatewayProxyEvent & { + const eventWithContextFields = event as APIGatewayProxyEvent & { rawPath?: string; - requestContext?: { http?: { method?: string } }; + requestContext?: { + http?: { method?: string }; + elb?: { targetGroupArn: string }; + }; }; const headers = Object.fromEntries( Object.entries(event.headers).map(([k, v]) => [String(k).toLowerCase(), v]), ) as Record; - const path = event.path ?? eventWithFunctionUrlFields.rawPath; + const path = event.path ?? eventWithContextFields.rawPath; + + const isAlbInvocation = Boolean(eventWithContextFields.requestContext?.elb); + const clientCertPresent = Boolean(headers["x-amzn-mtls-clientcert"]); + let isMtls = false; + if (isAlbInvocation) { + const certResult = verifyClientCertificate( + headers["x-amzn-mtls-clientcert"], + ); + isMtls = certResult.valid; + if (isMtls) { + logger.info("mTLS client certificate verified", { + fingerprint: headers["x-amzn-mtls-clientcert-fingerprint"] ?? "", + isMtls: true, + }); + } else { + logger.info("Mock webhook invoked without mTLS", { + isMtls: false, + clientCertPresent, + reason: certResult.reason, + }); + } + } logger.info("Mock webhook invoked", { path, method: event.httpMethod, hasBody: Boolean(event.body), + isMtls, + clientCertPresent, "x-api-key": headers["x-api-key"], "x-hmac-sha256-signature": headers["x-hmac-sha256-signature"], payload: event.body, @@ -124,6 +175,7 @@ async function buildResponse( messageId, callbackType: item.type, path, + isMtls, apiKey: providedApiKey, signature: headers["x-hmac-sha256-signature"] ?? "", payload: JSON.stringify(item), diff --git a/lambdas/perf-runner-lambda/jest.config.ts b/lambdas/perf-runner-lambda/jest.config.ts new file mode 100644 index 00000000..218d8ffd --- /dev/null +++ b/lambdas/perf-runner-lambda/jest.config.ts @@ -0,0 +1,13 @@ +import { nodeJestConfig } from "../../jest.config.base.ts"; + +export default { + ...nodeJestConfig, + coverageThreshold: { + global: { + ...nodeJestConfig.coverageThreshold?.global, + branches: 100, + lines: 100, + statements: 100, + }, + }, +}; diff --git a/tests/performance/package.json b/lambdas/perf-runner-lambda/package.json similarity index 60% rename from tests/performance/package.json rename to lambdas/perf-runner-lambda/package.json index 5e2f6c2f..9f9d01d8 100644 --- a/tests/performance/package.json +++ b/lambdas/perf-runner-lambda/package.json @@ -1,30 +1,32 @@ { + "name": "nhs-notify-perf-runner-lambda", + "version": "0.0.1", + "private": true, "engines": { "node": ">=24.14.1" }, - "name": "nhs-notify-client-callbacks-performance-tests", - "version": "0.0.1", - "private": true, "scripts": { - "test:performance": "jest", - "test:unit": "echo 'No unit tests in performance workspace - skipping'", + "lambda-build": "rm -rf dist && pnpm exec esbuild --bundle --minify --sourcemap --target=es2020 --platform=node --entry-names=[name] --outdir=dist src/index.ts", "lint": "eslint .", "lint:fix": "eslint . --fix", + "test:unit": "jest", "typecheck": "tsc --noEmit" }, "dependencies": { "@aws-sdk/client-cloudwatch-logs": "catalog:aws", "@aws-sdk/client-sqs": "catalog:aws", + "@nhs-notify-client-callbacks/logger": "workspace:*", "@nhs-notify-client-callbacks/models": "workspace:*", - "@nhs-notify-client-callbacks/test-support": "workspace:*", - "async-wait-until": "catalog:app" + "esbuild": "catalog:tools" }, "devDependencies": { "@tsconfig/node22": "catalog:tools", + "@types/aws-lambda": "catalog:tools", "@types/jest": "catalog:test", "@types/node": "catalog:tools", "eslint": "catalog:lint", "jest": "catalog:test", + "ts-jest": "catalog:test", "typescript": "catalog:tools" } } diff --git a/lambdas/perf-runner-lambda/src/__tests__/cloudwatch.test.ts b/lambdas/perf-runner-lambda/src/__tests__/cloudwatch.test.ts new file mode 100644 index 00000000..055ac7bc --- /dev/null +++ b/lambdas/perf-runner-lambda/src/__tests__/cloudwatch.test.ts @@ -0,0 +1,287 @@ +import type { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; +import { queryDeliveryMetricsSnapshot, queryMetricsSnapshot } from "cloudwatch"; + +const mockCloudWatchClient = { + send: jest.fn(), +} as unknown as jest.Mocked; + +beforeEach(() => { + jest.useFakeTimers(); +}); + +afterEach(() => { + jest.useRealTimers(); +}); + +describe("queryMetricsSnapshot", () => { + it("returns null when StartQuery returns no queryId", async () => { + mockCloudWatchClient.send.mockResolvedValueOnce({} as never); + + const result = await queryMetricsSnapshot( + mockCloudWatchClient, + "/aws/lambda/nhs-dev-callbacks-client-transform-filter", + 1_700_000_000, + 1_700_000_060, + ); + + expect(result).toBeNull(); + }); + + it("returns null when the query status is Failed", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-1" } as never) + .mockResolvedValueOnce({ status: "Failed" } as never); + + const promise = queryMetricsSnapshot( + mockCloudWatchClient, + "/aws/lambda/test", + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result).toBeNull(); + }); + + it("returns null when the query status is Cancelled", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-2" } as never) + .mockResolvedValueOnce({ status: "Cancelled" } as never); + + const promise = queryMetricsSnapshot( + mockCloudWatchClient, + "/aws/lambda/test", + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result).toBeNull(); + }); + + it("returns a snapshot with zeroed metrics when the result row is empty", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-3" } as never) + .mockResolvedValueOnce({ status: "Complete", results: [] } as never); + + const promise = queryMetricsSnapshot( + mockCloudWatchClient, + "/aws/lambda/test", + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result).toMatchObject({ p50Ms: 0, p95Ms: 0, p99Ms: 0, count: 0 }); + expect(result?.snapshotAt).toBeGreaterThan(0); + }); + + it("returns a populated snapshot when query completes successfully", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-4" } as never) + .mockResolvedValueOnce({ + status: "Complete", + results: [ + [ + { field: "eventCount", value: "500" }, + { field: "p50", value: "42" }, + { field: "p95", value: "120" }, + { field: "p99", value: "250" }, + ], + ], + } as never); + + const promise = queryMetricsSnapshot( + mockCloudWatchClient, + "/aws/lambda/test", + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result).toMatchObject({ + count: 500, + p50Ms: 42, + p95Ms: 120, + p99Ms: 250, + }); + }); + + it("polls until the query becomes Complete", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-5" } as never) + .mockResolvedValueOnce({ status: "Running" } as never) + .mockResolvedValueOnce({ status: "Running" } as never) + .mockResolvedValueOnce({ + status: "Complete", + results: [[{ field: "eventCount", value: "10" }]], + } as never); + + const promise = queryMetricsSnapshot( + mockCloudWatchClient, + "/aws/lambda/test", + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result?.count).toBe(10); + expect(mockCloudWatchClient.send).toHaveBeenCalledTimes(4); + }); + + it("returns null when the query does not complete within the timeout", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-6" } as never) + .mockResolvedValue({ status: "Running" } as never); + + const promise = queryMetricsSnapshot( + mockCloudWatchClient, + "/aws/lambda/test", + 0, + 60, + ); + + await jest.advanceTimersByTimeAsync(60_000); + const result = await promise; + + expect(result).toBeNull(); + }); +}); + +describe("queryDeliveryMetricsSnapshot", () => { + it("returns null when logGroupNames is empty", async () => { + const result = await queryDeliveryMetricsSnapshot( + mockCloudWatchClient, + [], + 0, + 60, + ); + + expect(result).toBeNull(); + expect(mockCloudWatchClient.send).not.toHaveBeenCalled(); + }); + + it("returns null when StartQuery returns no queryId", async () => { + mockCloudWatchClient.send.mockResolvedValueOnce({} as never); + + const result = await queryDeliveryMetricsSnapshot( + mockCloudWatchClient, + ["/aws/lambda/test-https-client-perf-client-1"], + 0, + 60, + ); + + expect(result).toBeNull(); + }); + + it("sends logGroupNames to StartQuery", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-d1" } as never) + .mockResolvedValueOnce({ status: "Complete", results: [] } as never); + + const logGroups = [ + "/aws/lambda/test-https-client-perf-client-1", + "/aws/lambda/test-https-client-perf-client-2", + ]; + + const promise = queryDeliveryMetricsSnapshot( + mockCloudWatchClient, + logGroups, + 0, + 60, + ); + + await jest.runAllTimersAsync(); + await promise; + + const startCmd = mockCloudWatchClient.send.mock.calls[0][0] as { + input: { logGroupNames: string[] }; + }; + expect(startCmd.input.logGroupNames).toEqual(logGroups); + }); + + it("returns a snapshot with zeroed metrics when the result row is empty", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-d2" } as never) + .mockResolvedValueOnce({ status: "Complete", results: [] } as never); + + const promise = queryDeliveryMetricsSnapshot( + mockCloudWatchClient, + ["/aws/lambda/test-https-client-perf-client-1"], + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result).toMatchObject({ + deliveryCount: 0, + p50Ms: 0, + p95Ms: 0, + p99Ms: 0, + }); + expect(result?.snapshotAt).toBeGreaterThan(0); + }); + + it("returns a populated snapshot when query completes successfully", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-d3" } as never) + .mockResolvedValueOnce({ + status: "Complete", + results: [ + [ + { field: "deliveryCount", value: "200" }, + { field: "p50", value: "85" }, + { field: "p95", value: "250" }, + { field: "p99", value: "450" }, + ], + ], + } as never); + + const promise = queryDeliveryMetricsSnapshot( + mockCloudWatchClient, + ["/aws/lambda/test-https-client-perf-client-1"], + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result).toMatchObject({ + deliveryCount: 200, + p50Ms: 85, + p95Ms: 250, + p99Ms: 450, + }); + }); + + it("returns null when the query status is Failed", async () => { + mockCloudWatchClient.send + .mockResolvedValueOnce({ queryId: "qid-d4" } as never) + .mockResolvedValueOnce({ status: "Failed" } as never); + + const promise = queryDeliveryMetricsSnapshot( + mockCloudWatchClient, + ["/aws/lambda/test-https-client-perf-client-1"], + 0, + 60, + ); + + await jest.runAllTimersAsync(); + const result = await promise; + + expect(result).toBeNull(); + }); +}); diff --git a/lambdas/perf-runner-lambda/src/__tests__/event-factories.test.ts b/lambdas/perf-runner-lambda/src/__tests__/event-factories.test.ts new file mode 100644 index 00000000..1c877a17 --- /dev/null +++ b/lambdas/perf-runner-lambda/src/__tests__/event-factories.test.ts @@ -0,0 +1,68 @@ +import { EventTypes } from "@nhs-notify-client-callbacks/models"; +import { + createChannelStatusEvent, + createEvent, + createMessageStatusEvent, +} from "event-factories"; + +describe("createMessageStatusEvent", () => { + it("creates a valid message status CloudEvent with the given clientId and status", () => { + const event = createMessageStatusEvent("perf-client-1", "DELIVERED"); + + expect(event.specversion).toBe("1.0"); + expect(event.type).toBe(EventTypes.MESSAGE_STATUS_PUBLISHED); + expect(event.datacontenttype).toBe("application/json"); + expect(event.data.clientId).toBe("perf-client-1"); + expect(event.data.messageStatus).toBe("DELIVERED"); + expect(event.data.messageId).toBeTruthy(); + expect(event.id).toBeTruthy(); + }); + + it("assigns a unique id and messageId on each call", () => { + const a = createMessageStatusEvent("perf-client-1", "FAILED"); + const b = createMessageStatusEvent("perf-client-1", "FAILED"); + + expect(a.id).not.toBe(b.id); + expect(a.data.messageId).not.toBe(b.data.messageId); + }); +}); + +describe("createChannelStatusEvent", () => { + it("creates a valid channel status CloudEvent with the given clientId and status", () => { + const event = createChannelStatusEvent("perf-client-2", "DELIVERED"); + + expect(event.specversion).toBe("1.0"); + expect(event.type).toBe(EventTypes.CHANNEL_STATUS_PUBLISHED); + expect(event.datacontenttype).toBe("application/json"); + expect(event.data.clientId).toBe("perf-client-2"); + expect(event.data.channelStatus).toBe("DELIVERED"); + expect(event.data.messageId).toBeTruthy(); + expect(event.id).toBeTruthy(); + }); +}); + +describe("createEvent", () => { + it("delegates to createMessageStatusEvent for messageStatus factory entries", () => { + const event = createEvent({ + weight: 1, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "SENDING", + }); + + expect(event.type).toBe(EventTypes.MESSAGE_STATUS_PUBLISHED); + expect(event.data.clientId).toBe("perf-client-1"); + }); + + it("delegates to createChannelStatusEvent for channelStatus factory entries", () => { + const event = createEvent({ + weight: 1, + factory: "channelStatus", + clientId: "perf-client-2", + channelStatus: "FAILED", + }); + + expect(event.type).toBe(EventTypes.CHANNEL_STATUS_PUBLISHED); + expect(event.data.clientId).toBe("perf-client-2"); + }); +}); diff --git a/lambdas/perf-runner-lambda/src/__tests__/index.test.ts b/lambdas/perf-runner-lambda/src/__tests__/index.test.ts new file mode 100644 index 00000000..1d1a501a --- /dev/null +++ b/lambdas/perf-runner-lambda/src/__tests__/index.test.ts @@ -0,0 +1,122 @@ +import { handler } from "index"; +import type { PerformanceResult } from "types"; +import { DEFAULT_SCENARIO } from "scenario"; + +import { runPerformanceTest } from "runner"; + +jest.mock("@aws-sdk/client-sqs", () => ({ + SQSClient: jest.fn(() => ({ destroy: jest.fn() })), +})); + +jest.mock("@aws-sdk/client-cloudwatch-logs", () => ({ + CloudWatchLogsClient: jest.fn(() => ({ destroy: jest.fn() })), +})); + +jest.mock("runner"); +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + Logger: jest.fn(() => ({ + info: jest.fn(), + error: jest.fn(), + })), +})); + +const mockRunPerformanceTest = runPerformanceTest as jest.MockedFunction< + typeof runPerformanceTest +>; + +const mockResult: PerformanceResult = { + testId: "test-id", + scenario: DEFAULT_SCENARIO, + startedAt: "2026-04-09T10:00:00.000Z", + completedAt: "2026-04-09T10:02:00.000Z", + phases: [], + metrics: [], + deliveryMetrics: [], +}; + +beforeEach(() => { + jest.clearAllMocks(); + mockRunPerformanceTest.mockResolvedValue(mockResult); + process.env.INBOUND_QUEUE_URL = "https://sqs.example.invalid/queue"; + process.env.TRANSFORM_FILTER_LOG_GROUP = + "/aws/lambda/nhs-dev-callbacks-client-transform-filter"; + process.env.DELIVERY_LOG_GROUP_PREFIX = + "/aws/lambda/nhs-dev-callbacks-https-client-"; + process.env.AWS_REGION = "eu-west-2"; +}); + +describe("handler", () => { + it("calls runPerformanceTest with the provided testId and scenario", async () => { + const result = await handler({ testId: "test-id" }); + + expect(result).toEqual(mockResult); + expect(mockRunPerformanceTest).toHaveBeenCalledWith( + expect.objectContaining({ + queueUrl: "https://sqs.example.invalid/queue", + logGroupName: "/aws/lambda/nhs-dev-callbacks-client-transform-filter", + deliveryLogGroupPrefix: "/aws/lambda/nhs-dev-callbacks-https-client-", + }), + DEFAULT_SCENARIO, + "test-id", + ); + }); + + it("uses a custom scenario when one is provided in the event", async () => { + const customScenario = { + ...DEFAULT_SCENARIO, + phases: [{ durationSecs: 5, targetEps: 500 }], + }; + + await handler({ testId: "custom-test", scenario: customScenario }); + + expect(mockRunPerformanceTest).toHaveBeenCalledWith( + expect.anything(), + customScenario, + "custom-test", + ); + }); + + it("destroys AWS clients even when runPerformanceTest throws", async () => { + const { SQSClient } = jest.requireMock("@aws-sdk/client-sqs"); + const mockDestroy = jest.fn(); + SQSClient.mockReturnValue({ destroy: mockDestroy }); + + mockRunPerformanceTest.mockRejectedValue(new Error("test failure")); + + await expect(handler({ testId: "failing-test" })).rejects.toThrow( + "test failure", + ); + expect(mockDestroy).toHaveBeenCalled(); + }); + + it("throws when INBOUND_QUEUE_URL is missing", async () => { + delete process.env.INBOUND_QUEUE_URL; + + await expect(handler({ testId: "missing-queue-test" })).rejects.toThrow( + "Missing required environment variable: INBOUND_QUEUE_URL", + ); + }); + + it("throws when TRANSFORM_FILTER_LOG_GROUP is missing", async () => { + delete process.env.TRANSFORM_FILTER_LOG_GROUP; + delete process.env.AWS_REGION; + + await expect(handler({ testId: "missing-log-group-test" })).rejects.toThrow( + "Missing required environment variable: TRANSFORM_FILTER_LOG_GROUP", + ); + }); + + it("passes undefined deliveryLogGroupPrefix when env var is not set", async () => { + delete process.env.DELIVERY_LOG_GROUP_PREFIX; + + await handler({ testId: "no-prefix-test" }); + + expect(mockRunPerformanceTest).toHaveBeenCalledWith( + expect.objectContaining({ + deliveryLogGroupPrefix: undefined, + }), + DEFAULT_SCENARIO, + "no-prefix-test", + ); + }); +}); diff --git a/lambdas/perf-runner-lambda/src/__tests__/runner.test.ts b/lambdas/perf-runner-lambda/src/__tests__/runner.test.ts new file mode 100644 index 00000000..1cf5f3a3 --- /dev/null +++ b/lambdas/perf-runner-lambda/src/__tests__/runner.test.ts @@ -0,0 +1,323 @@ +import type { SQSClient } from "@aws-sdk/client-sqs"; +import type { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; +import type { + DeliveryMetricsSnapshot, + MetricsSnapshot, + PhaseResult, + RunnerDeps, + Scenario, +} from "types"; +import { defaultSleep, runPerformanceTest } from "runner"; + +import { generatePhaseLoad } from "sqs"; +import { queryDeliveryMetricsSnapshot, queryMetricsSnapshot } from "cloudwatch"; + +jest.mock("sqs"); +jest.mock("cloudwatch"); + +const mockGeneratePhaseLoad = jest.mocked(generatePhaseLoad); +const mockQueryMetricsSnapshot = jest.mocked(queryMetricsSnapshot); +const mockQueryDeliveryMetricsSnapshot = jest.mocked( + queryDeliveryMetricsSnapshot, +); + +const immediateSleep = jest.fn().mockResolvedValue(undefined); + +const mockPhaseResult: PhaseResult = { + targetEps: 1000, + achievedEps: 980, + sent: 1000, + durationMs: 1020, +}; + +const mockSnapshot: MetricsSnapshot = { + snapshotAt: Date.now(), + p50Ms: 30, + p95Ms: 80, + p99Ms: 150, + count: 100, +}; + +const mockDeliverySnapshot: DeliveryMetricsSnapshot = { + snapshotAt: Date.now(), + deliveryCount: 50, + p50Ms: 120, + p95Ms: 300, + p99Ms: 500, +}; + +const scenario: Scenario = { + phases: [{ durationSecs: 1, targetEps: 1000 }], + eventMix: [ + { + weight: 1, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "DELIVERED", + }, + ], + metricsIntervalSecs: 1, +}; + +const deps: RunnerDeps = { + sqsClient: {} as SQSClient, + cloudWatchClient: {} as CloudWatchLogsClient, + queueUrl: "https://sqs.example.invalid/queue", + logGroupName: "/aws/lambda/nhs-dev-callbacks-client-transform-filter", + deliveryLogGroupPrefix: "/aws/lambda/nhs-dev-callbacks-https-client-", +}; + +beforeEach(() => { + jest.clearAllMocks(); + mockGeneratePhaseLoad.mockResolvedValue(mockPhaseResult); + mockQueryDeliveryMetricsSnapshot.mockResolvedValue(null); + immediateSleep.mockResolvedValue(undefined); +}); + +describe("runPerformanceTest", () => { + it("returns a PerformanceResult with phase results and snapshots from polling and final query", async () => { + mockQueryMetricsSnapshot.mockResolvedValue(mockSnapshot); + mockQueryDeliveryMetricsSnapshot.mockResolvedValue(mockDeliverySnapshot); + + const result = await runPerformanceTest( + deps, + scenario, + "test-id-1", + immediateSleep, + ); + + expect(result.testId).toBe("test-id-1"); + expect(result.scenario).toBe(scenario); + expect(result.phases).toHaveLength(1); + expect(result.phases[0]).toEqual(mockPhaseResult); + expect(result.metrics).toHaveLength(2); // one mid-test, one final + expect(result.deliveryMetrics).toHaveLength(2); // one mid-test, one final + expect(result.startedAt).toBeTruthy(); + expect(result.completedAt).toBeTruthy(); + }); + + it("excludes null snapshots from the metrics array", async () => { + mockQueryMetricsSnapshot + .mockResolvedValueOnce(null) // mid-test poll returns null + .mockResolvedValueOnce(mockSnapshot); // final query returns snapshot + + const result = await runPerformanceTest( + deps, + scenario, + "test-id-2", + immediateSleep, + ); + + expect(result.metrics).toHaveLength(1); + expect(result.metrics[0]).toEqual(mockSnapshot); + expect(result.deliveryMetrics).toHaveLength(0); + }); + + it("produces an empty metrics array when all queries return null", async () => { + mockQueryMetricsSnapshot.mockResolvedValue(null); + + const result = await runPerformanceTest( + deps, + scenario, + "test-id-3", + immediateSleep, + ); + + expect(result.metrics).toHaveLength(0); + expect(result.deliveryMetrics).toHaveLength(0); + }); + + it("runs all phases and collects each result", async () => { + const multiPhaseScenario: Scenario = { + ...scenario, + phases: [ + { durationSecs: 1, targetEps: 500 }, + { durationSecs: 1, targetEps: 1000 }, + ], + }; + + const phase1Result = { ...mockPhaseResult, targetEps: 500 }; + const phase2Result = { ...mockPhaseResult, targetEps: 1000 }; + + mockGeneratePhaseLoad + .mockResolvedValueOnce(phase1Result) + .mockResolvedValueOnce(phase2Result); + mockQueryMetricsSnapshot.mockResolvedValue(null); + + const result = await runPerformanceTest( + deps, + multiPhaseScenario, + "test-id-4", + immediateSleep, + ); + + expect(result.phases).toHaveLength(2); + expect(result.phases[0]).toEqual(phase1Result); + expect(result.phases[1]).toEqual(phase2Result); + }); + + it("collects delivery metrics across multiple poll iterations", async () => { + let resolvePhase!: (value: PhaseResult) => void; + mockGeneratePhaseLoad.mockImplementation( + () => + new Promise((r) => { + resolvePhase = r; + }), + ); + mockQueryMetricsSnapshot.mockResolvedValue(mockSnapshot); + mockQueryDeliveryMetricsSnapshot.mockResolvedValue(mockDeliverySnapshot); + + let sleepCount = 0; + const controlledSleep = jest.fn(async () => { + sleepCount += 1; + if (sleepCount >= 3) { + resolvePhase(mockPhaseResult); + } + }); + + const result = await runPerformanceTest( + deps, + scenario, + "test-id-poll", + controlledSleep, + ); + + expect(result.deliveryMetrics.length).toBeGreaterThanOrEqual(1); + }); + + it("throws when scenario.eventMix is empty", async () => { + const emptyMixScenario: Scenario = { ...scenario, eventMix: [] }; + + await expect( + runPerformanceTest( + deps, + emptyMixScenario, + "empty-mix-test", + immediateSleep, + ), + ).rejects.toThrow("scenario.eventMix must contain at least one entry"); + }); + + it("throws when a phase has durationSecs of zero", async () => { + const badScenario: Scenario = { + ...scenario, + phases: [{ durationSecs: 0, targetEps: 1000 }], + }; + + await expect( + runPerformanceTest( + deps, + badScenario, + "zero-duration-test", + immediateSleep, + ), + ).rejects.toThrow("scenario.phases[0].durationSecs must be greater than 0"); + }); + + it("throws when a phase has targetEps of zero", async () => { + const badScenario: Scenario = { + ...scenario, + phases: [{ durationSecs: 1, targetEps: 0 }], + }; + + await expect( + runPerformanceTest(deps, badScenario, "zero-eps-test", immediateSleep), + ).rejects.toThrow("scenario.phases[0].targetEps must be greater than 0"); + }); + + it("throws when a later phase has an invalid value", async () => { + const badScenario: Scenario = { + ...scenario, + phases: [ + { durationSecs: 1, targetEps: 1000 }, + { durationSecs: 1, targetEps: 0 }, + ], + }; + + await expect( + runPerformanceTest(deps, badScenario, "later-phase-test", immediateSleep), + ).rejects.toThrow("scenario.phases[1].targetEps must be greater than 0"); + }); + + it("calls generatePhaseLoad with the correct phase and deps", async () => { + mockQueryMetricsSnapshot.mockResolvedValue(null); + + await runPerformanceTest(deps, scenario, "test-id-5", immediateSleep); + + expect(mockGeneratePhaseLoad).toHaveBeenCalledWith( + deps.sqsClient, + deps.queueUrl, + scenario.phases[0], + scenario.eventMix, + ); + }); + + it("skips delivery metrics when deliveryLogGroupPrefix is undefined", async () => { + const depsWithoutPrefix: RunnerDeps = { + ...deps, + deliveryLogGroupPrefix: undefined, + }; + mockQueryMetricsSnapshot.mockResolvedValue(mockSnapshot); + + const result = await runPerformanceTest( + depsWithoutPrefix, + scenario, + "test-id-6", + immediateSleep, + ); + + expect(mockQueryDeliveryMetricsSnapshot).not.toHaveBeenCalled(); + expect(result.deliveryMetrics).toHaveLength(0); + }); + + it("builds delivery log group names from prefix and event mix client IDs", async () => { + mockQueryMetricsSnapshot.mockResolvedValue(null); + mockQueryDeliveryMetricsSnapshot.mockResolvedValue(null); + + const multiClientScenario: Scenario = { + ...scenario, + eventMix: [ + { + weight: 1, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "DELIVERED", + }, + { + weight: 1, + factory: "channelStatus", + clientId: "perf-client-2", + channelStatus: "DELIVERED", + }, + ], + }; + + await runPerformanceTest( + deps, + multiClientScenario, + "test-id-7", + immediateSleep, + ); + + expect(mockQueryDeliveryMetricsSnapshot).toHaveBeenCalledWith( + deps.cloudWatchClient, + expect.arrayContaining([ + "/aws/lambda/nhs-dev-callbacks-https-client-perf-client-1", + "/aws/lambda/nhs-dev-callbacks-https-client-perf-client-2", + ]), + expect.any(Number), + expect.any(Number), + ); + }); +}); + +describe("defaultSleep", () => { + beforeEach(() => jest.useFakeTimers()); + afterEach(() => jest.useRealTimers()); + + it("resolves after the specified delay", async () => { + const promise = defaultSleep(500); + await jest.advanceTimersByTimeAsync(500); + await expect(promise).resolves.toBeUndefined(); + }); +}); diff --git a/lambdas/perf-runner-lambda/src/__tests__/sqs.test.ts b/lambdas/perf-runner-lambda/src/__tests__/sqs.test.ts new file mode 100644 index 00000000..63ab41df --- /dev/null +++ b/lambdas/perf-runner-lambda/src/__tests__/sqs.test.ts @@ -0,0 +1,141 @@ +import type { SQSClient } from "@aws-sdk/client-sqs"; +import type { EventMixEntry, Phase } from "types"; +import { generatePhaseLoad, selectWeighted, sendSqsBatch } from "sqs"; + +jest.mock("event-factories", () => ({ + createEvent: jest.fn(() => ({ + specversion: "1.0", + id: "mock-event-id", + type: "mock.type", + data: {}, + })), +})); + +const mockSqsClient = { + send: jest.fn(), +} as unknown as jest.Mocked; + +beforeEach(() => { + mockSqsClient.send.mockResolvedValue({} as never); +}); + +describe("selectWeighted", () => { + it("returns the only entry when there is one", () => { + const entries = [{ weight: 1, value: "a" }]; + const result = selectWeighted(entries); + expect(result).toBe(entries[0]); + }); + + it("distributes selections according to weight over many draws", () => { + const entries = [ + { weight: 9, label: "heavy" }, + { weight: 1, label: "light" }, + ]; + + const counts = { heavy: 0, light: 0 }; + for (let i = 0; i < 1000; i += 1) { + const selected = selectWeighted(entries); + counts[selected.label as keyof typeof counts] += 1; + } + + expect(counts.heavy).toBeGreaterThan(counts.light); + }); + + it("returns the last entry via fallback when no earlier entry matches", () => { + // With Math.random = 0.5, remaining = 0.5 * 10 = 5. + // First entry has weight 1; 5 - 1 = 4 > 0, so loop skips it. + // Fallback returns the last entry. + jest.spyOn(Math, "random").mockReturnValue(0.5); + const entries = [ + { weight: 1, label: "light" }, + { weight: 9, label: "heavy" }, + ]; + + const result = selectWeighted(entries); + expect(result.label).toBe("heavy"); + jest.restoreAllMocks(); + }); +}); + +describe("sendSqsBatch", () => { + it("sends a SendMessageBatchCommand with serialised event bodies", async () => { + const events = [ + { specversion: "1.0", id: "a", type: "t", data: {} }, + { specversion: "1.0", id: "b", type: "t", data: {} }, + ] as never[]; + + await sendSqsBatch( + mockSqsClient, + "https://sqs.example.invalid/queue", + events, + ); + + expect(mockSqsClient.send).toHaveBeenCalledTimes(1); + const command = mockSqsClient.send.mock.calls[0][0] as { + input: { + QueueUrl: string; + Entries: { Id: string; MessageBody: string }[]; + }; + }; + expect(command.input.QueueUrl).toBe("https://sqs.example.invalid/queue"); + expect(command.input.Entries).toHaveLength(2); + expect(command.input.Entries[0].Id).toBe("0"); + expect(JSON.parse(command.input.Entries[0].MessageBody)).toMatchObject({ + id: "a", + }); + }); +}); + +describe("generatePhaseLoad", () => { + it("returns a PhaseResult with sent count and timing", async () => { + const phase: Phase = { durationSecs: 1, targetEps: 10 }; + const eventMix: EventMixEntry[] = [ + { + weight: 1, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "DELIVERED", + }, + ]; + + const result = await generatePhaseLoad( + mockSqsClient, + "https://sqs.example.invalid/queue", + phase, + eventMix, + ); + + expect(result.targetEps).toBe(10); + expect(result.sent).toBeGreaterThan(0); + expect(result.durationMs).toBeGreaterThanOrEqual(0); + expect(result.achievedEps).toBeGreaterThan(0); + expect(mockSqsClient.send).toHaveBeenCalled(); + }); + + it("throttles between seconds when the wave completes early", async () => { + jest.useFakeTimers(); + + const phase: Phase = { durationSecs: 2, targetEps: 10 }; + const eventMix: EventMixEntry[] = [ + { + weight: 1, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "DELIVERED", + }, + ]; + + const resultPromise = generatePhaseLoad( + mockSqsClient, + "https://sqs.example.invalid/queue", + phase, + eventMix, + ); + + await jest.runAllTimersAsync(); + const result = await resultPromise; + + expect(result.sent).toBeGreaterThan(0); + jest.useRealTimers(); + }); +}); diff --git a/lambdas/perf-runner-lambda/src/cloudwatch.ts b/lambdas/perf-runner-lambda/src/cloudwatch.ts new file mode 100644 index 00000000..206bec33 --- /dev/null +++ b/lambdas/perf-runner-lambda/src/cloudwatch.ts @@ -0,0 +1,110 @@ +import { + type CloudWatchLogsClient, + GetQueryResultsCommand, + StartQueryCommand, +} from "@aws-sdk/client-cloudwatch-logs"; +import type { DeliveryMetricsSnapshot, MetricsSnapshot } from "types"; + +const INSIGHTS_POLL_INTERVAL_MS = 2000; +const INSIGHTS_TIMEOUT_MS = 30_000; + +type ResultField = { field?: string; value?: string }; + +async function pollQueryResults( + client: CloudWatchLogsClient, + queryId: string, + mapRow: (row: ResultField[]) => T, +): Promise { + const zeroResult = mapRow([]); + const deadline = Date.now() + INSIGHTS_TIMEOUT_MS; + + while (Date.now() < deadline) { + await new Promise((resolve) => { + setTimeout(resolve, INSIGHTS_POLL_INTERVAL_MS); + }); + + const response = await client.send(new GetQueryResultsCommand({ queryId })); + + if (response.status === "Failed" || response.status === "Cancelled") { + return null; + } + + if (response.status === "Complete") { + const row = response.results?.[0]; + if (!row) return zeroResult; + return mapRow(row); + } + } + + return null; +} + +export async function queryMetricsSnapshot( + client: CloudWatchLogsClient, + logGroupName: string, + startTimeSec: number, + endTimeSec: number, +): Promise { + const { queryId } = await client.send( + new StartQueryCommand({ + logGroupName, + startTime: startTimeSec, + endTime: endTimeSec, + queryString: [ + 'filter msg = "Callback lifecycle: batch-processing-completed"', + "| stats count(*) as eventCount, pct(processingTimeMs, 50) as p50, pct(processingTimeMs, 95) as p95, pct(processingTimeMs, 99) as p99", + ].join("\n"), + }), + ); + + if (!queryId) return null; + + return pollQueryResults(client, queryId, (row) => { + const getField = (name: string): number => + Number(row.find((f) => f.field === name)?.value ?? 0); + + return { + snapshotAt: Date.now(), + p50Ms: getField("p50"), + p95Ms: getField("p95"), + p99Ms: getField("p99"), + count: getField("eventCount"), + }; + }); +} + +export async function queryDeliveryMetricsSnapshot( + client: CloudWatchLogsClient, + logGroupNames: string[], + startTimeSec: number, + endTimeSec: number, +): Promise { + if (logGroupNames.length === 0) return null; + + const { queryId } = await client.send( + new StartQueryCommand({ + logGroupNames, + startTime: startTimeSec, + endTime: endTimeSec, + queryString: [ + "filter ispresent(DeliveryDurationMs)", + "| stats count(DeliveryDurationMs) as deliveryCount, pct(DeliveryDurationMs, 50) as p50, pct(DeliveryDurationMs, 95) as p95, pct(DeliveryDurationMs, 99) as p99", + ].join("\n"), + }), + ); + + if (!queryId) return null; + + return pollQueryResults(client, queryId, (row) => { + const getField = (name: string): number => + Number(row.find((f) => f.field === name)?.value ?? 0); + + return { + snapshotAt: Date.now(), + deliveryCount: getField("deliveryCount"), + p50Ms: getField("p50"), + p95Ms: getField("p95"), + p99Ms: getField("p99"), + }; + }); +} diff --git a/tests/performance/helpers/event-factories.ts b/lambdas/perf-runner-lambda/src/event-factories.ts similarity index 70% rename from tests/performance/helpers/event-factories.ts rename to lambdas/perf-runner-lambda/src/event-factories.ts index c31571e4..6f39add9 100644 --- a/tests/performance/helpers/event-factories.ts +++ b/lambdas/perf-runner-lambda/src/event-factories.ts @@ -1,22 +1,24 @@ import type { + ChannelStatus, ChannelStatusData, + MessageStatus, MessageStatusData, StatusPublishEvent, } from "@nhs-notify-client-callbacks/models"; import { EventTypes } from "@nhs-notify-client-callbacks/models"; +import type { EventMixEntry } from "types"; -export function createMessageStatusPublishEvent( - overrides?: Partial, +export function createMessageStatusEvent( + clientId: string, + messageStatus: MessageStatus, ): StatusPublishEvent { - const messageId = overrides?.messageId ?? crypto.randomUUID(); - const messageReference = - overrides?.messageReference ?? `ref-${crypto.randomUUID()}`; + const messageId = crypto.randomUUID(); const data: MessageStatusData = { - clientId: "mock-client-1", + clientId, messageId, - messageReference, - messageStatus: "DELIVERED", + messageReference: `ref-${crypto.randomUUID()}`, + messageStatus, channels: [{ type: "NHSAPP", channelStatus: "DELIVERED" }], timestamp: new Date().toISOString(), routingPlan: { @@ -25,7 +27,6 @@ export function createMessageStatusPublishEvent( version: "v1.0.0", createdDate: new Date().toISOString(), }, - ...overrides, }; return { @@ -43,26 +44,23 @@ export function createMessageStatusPublishEvent( }; } -export function createChannelStatusPublishEvent( - overrides?: Partial, +export function createChannelStatusEvent( + clientId: string, + channelStatus: ChannelStatus, ): StatusPublishEvent { - const messageId = overrides?.messageId ?? crypto.randomUUID(); - const messageReference = - overrides?.messageReference ?? `ref-${crypto.randomUUID()}`; + const messageId = crypto.randomUUID(); const data: ChannelStatusData = { - clientId: "mock-client-1", + clientId, messageId, - messageReference, + messageReference: `ref-${crypto.randomUUID()}`, channel: "NHSAPP", - channelStatus: "DELIVERED", - channelStatusDescription: "perf-test", + channelStatus, supplierStatus: "delivered", cascadeType: "primary", cascadeOrder: 0, timestamp: new Date().toISOString(), retryCount: 0, - ...overrides, }; return { @@ -79,3 +77,11 @@ export function createChannelStatusPublishEvent( data, }; } + +export function createEvent(entry: EventMixEntry): StatusPublishEvent { + if (entry.factory === "messageStatus") { + return createMessageStatusEvent(entry.clientId, entry.messageStatus); + } + + return createChannelStatusEvent(entry.clientId, entry.channelStatus); +} diff --git a/lambdas/perf-runner-lambda/src/index.ts b/lambdas/perf-runner-lambda/src/index.ts new file mode 100644 index 00000000..a0881866 --- /dev/null +++ b/lambdas/perf-runner-lambda/src/index.ts @@ -0,0 +1,55 @@ +import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; +import { SQSClient } from "@aws-sdk/client-sqs"; +import { Logger } from "@nhs-notify-client-callbacks/logger"; +import { runPerformanceTest } from "runner"; +import { DEFAULT_SCENARIO } from "scenario"; +import type { PerfRunnerPayload, PerformanceResult } from "types"; + +const logger = new Logger(); + +export async function handler( + event: PerfRunnerPayload, +): Promise { + const { scenario = DEFAULT_SCENARIO, testId } = event; + + const region = process.env.AWS_REGION ?? "eu-west-2"; + const queueUrl = process.env.INBOUND_QUEUE_URL; + const logGroupName = process.env.TRANSFORM_FILTER_LOG_GROUP; + const deliveryLogGroupPrefix = process.env.DELIVERY_LOG_GROUP_PREFIX; + + if (!queueUrl) { + throw new Error("Missing required environment variable: INBOUND_QUEUE_URL"); + } + + if (!logGroupName) { + throw new Error( + "Missing required environment variable: TRANSFORM_FILTER_LOG_GROUP", + ); + } + + const sqsClient = new SQSClient({ region }); + const cloudWatchClient = new CloudWatchLogsClient({ region }); + + logger.info("Performance test started", { testId }); + + try { + const result = await runPerformanceTest( + { + sqsClient, + cloudWatchClient, + queueUrl, + logGroupName, + deliveryLogGroupPrefix, + }, + scenario, + testId, + ); + + logger.info("Performance test completed", { testId }); + + return result; + } finally { + sqsClient.destroy(); + cloudWatchClient.destroy(); + } +} diff --git a/lambdas/perf-runner-lambda/src/runner.ts b/lambdas/perf-runner-lambda/src/runner.ts new file mode 100644 index 00000000..a265e90e --- /dev/null +++ b/lambdas/perf-runner-lambda/src/runner.ts @@ -0,0 +1,140 @@ +import type { + DeliveryMetricsSnapshot, + MetricsSnapshot, + PerformanceResult, + PhaseResult, + RunnerDeps, + Scenario, +} from "types"; +import { generatePhaseLoad } from "sqs"; +import { queryDeliveryMetricsSnapshot, queryMetricsSnapshot } from "cloudwatch"; + +const CLOUDWATCH_SETTLING_MS = 60_000; + +export const defaultSleep = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms); + }); + +function buildDeliveryLogGroupNames( + prefix: string | undefined, + scenario: Scenario, +): string[] { + if (!prefix) return []; + const clientIds = new Set(scenario.eventMix.map((e) => e.clientId)); + return [...clientIds].map((id) => `${prefix}${id}`); +} + +export async function runPerformanceTest( + deps: RunnerDeps, + scenario: Scenario, + testId: string, + sleepFn: (ms: number) => Promise = defaultSleep, +): Promise { + if (scenario.eventMix.length === 0) { + throw new Error("scenario.eventMix must contain at least one entry"); + } + + for (const [index, phase] of scenario.phases.entries()) { + if (phase.durationSecs <= 0) { + throw new Error( + `scenario.phases[${index}].durationSecs must be greater than 0`, + ); + } + if (phase.targetEps <= 0) { + throw new Error( + `scenario.phases[${index}].targetEps must be greater than 0`, + ); + } + } + + const testStartMs = Date.now(); + const startedAt = new Date(testStartMs).toISOString(); + const phaseResults: PhaseResult[] = []; + const snapshots: MetricsSnapshot[] = []; + const deliverySnapshots: DeliveryMetricsSnapshot[] = []; + let stopPolling = false; + + const deliveryLogGroupNames = buildDeliveryLogGroupNames( + deps.deliveryLogGroupPrefix, + scenario, + ); + + const pollLoop = async (): Promise => { + await sleepFn(scenario.metricsIntervalSecs * 1000); + while (!stopPolling) { + const startSec = Math.floor(testStartMs / 1000); + const endSec = Math.floor(Date.now() / 1000); + + const snap = await queryMetricsSnapshot( + deps.cloudWatchClient, + deps.logGroupName, + startSec, + endSec, + ); + if (snap !== null) snapshots.push(snap); + + if (deliveryLogGroupNames.length > 0) { + const deliverySnap = await queryDeliveryMetricsSnapshot( + deps.cloudWatchClient, + deliveryLogGroupNames, + startSec, + endSec, + ); + if (deliverySnap !== null) deliverySnapshots.push(deliverySnap); + } + + if (!stopPolling) { + await sleepFn(scenario.metricsIntervalSecs * 1000); + } + } + }; + + const pollPromise = pollLoop(); + + for (const phase of scenario.phases) { + const result = await generatePhaseLoad( + deps.sqsClient, + deps.queueUrl, + phase, + scenario.eventMix, + ); + phaseResults.push(result); + } + + stopPolling = true; + await pollPromise; + + await sleepFn(CLOUDWATCH_SETTLING_MS); + + const finalStartSec = Math.floor(testStartMs / 1000); + const finalEndSec = Math.floor(Date.now() / 1000); + + const finalSnap = await queryMetricsSnapshot( + deps.cloudWatchClient, + deps.logGroupName, + finalStartSec, + finalEndSec, + ); + if (finalSnap !== null) snapshots.push(finalSnap); + + if (deliveryLogGroupNames.length > 0) { + const finalDeliverySnap = await queryDeliveryMetricsSnapshot( + deps.cloudWatchClient, + deliveryLogGroupNames, + finalStartSec, + finalEndSec, + ); + if (finalDeliverySnap !== null) deliverySnapshots.push(finalDeliverySnap); + } + + return { + testId, + scenario, + startedAt, + completedAt: new Date().toISOString(), + phases: phaseResults, + metrics: snapshots, + deliveryMetrics: deliverySnapshots, + }; +} diff --git a/lambdas/perf-runner-lambda/src/scenario.ts b/lambdas/perf-runner-lambda/src/scenario.ts new file mode 100644 index 00000000..30c7cf72 --- /dev/null +++ b/lambdas/perf-runner-lambda/src/scenario.ts @@ -0,0 +1,82 @@ +import type { Scenario } from "types"; + +export const DEFAULT_SCENARIO: Scenario = { + phases: [ + { durationSecs: 15, targetEps: 1000 }, + { durationSecs: 15, targetEps: 2000 }, + { durationSecs: 30, targetEps: 3000 }, + ], + eventMix: [ + // perf-client-1: all message statuses → all subscription paths exercised + { + weight: 4, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "DELIVERED", + }, + { + weight: 2, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "FAILED", + }, + { + weight: 1, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "SENDING", + }, + { + weight: 1, + factory: "messageStatus", + clientId: "perf-client-1", + messageStatus: "PENDING_ENRICHMENT", + }, + // perf-client-2: channel status events + { + weight: 3, + factory: "channelStatus", + clientId: "perf-client-2", + channelStatus: "DELIVERED", + }, + { + weight: 1, + factory: "channelStatus", + clientId: "perf-client-2", + channelStatus: "FAILED", + }, + { + weight: 1, + factory: "channelStatus", + clientId: "perf-client-2", + channelStatus: "RETRY", + }, + // perf-client-3: DELIVERED matches (fan-out to 2 targets); SENDING is filtered + { + weight: 2, + factory: "messageStatus", + clientId: "perf-client-3", + messageStatus: "DELIVERED", + }, + { + weight: 1, + factory: "messageStatus", + clientId: "perf-client-3", + messageStatus: "SENDING", + }, + // perf-client-4: mixed message + channel status + { + weight: 2, + factory: "messageStatus", + clientId: "perf-client-4", + messageStatus: "DELIVERED", + }, + { + weight: 1, + factory: "channelStatus", + clientId: "perf-client-4", + channelStatus: "DELIVERED", + }, + ], + metricsIntervalSecs: 15, +}; diff --git a/lambdas/perf-runner-lambda/src/sqs.ts b/lambdas/perf-runner-lambda/src/sqs.ts new file mode 100644 index 00000000..154ce2e3 --- /dev/null +++ b/lambdas/perf-runner-lambda/src/sqs.ts @@ -0,0 +1,78 @@ +import { type SQSClient, SendMessageBatchCommand } from "@aws-sdk/client-sqs"; +import type { StatusPublishEvent } from "@nhs-notify-client-callbacks/models"; +import type { EventMixEntry, Phase, PhaseResult } from "types"; +import { createEvent } from "event-factories"; + +const SQS_MAX_BATCH_SIZE = 10; + +export function selectWeighted(entries: T[]): T { + const totalWeight = entries.reduce((sum, entry) => sum + entry.weight, 0); + // eslint-disable-next-line sonarjs/pseudo-random -- weighted selection for load test event distribution + let remaining = Math.random() * totalWeight; + + for (const entry of entries.slice(0, -1)) { + remaining -= entry.weight; + if (remaining <= 0) return entry; + } + + // Safe: selectWeighted is only called with non-empty arrays + return entries.at(-1)!; +} + +export async function sendSqsBatch( + client: SQSClient, + queueUrl: string, + events: StatusPublishEvent[], +): Promise { + await client.send( + new SendMessageBatchCommand({ + QueueUrl: queueUrl, + Entries: events.map((event, index) => ({ + Id: String(index), + MessageBody: JSON.stringify(event), + })), + }), + ); +} + +export async function generatePhaseLoad( + client: SQSClient, + queueUrl: string, + phase: Phase, + eventMix: EventMixEntry[], +): Promise { + const batchesPerSecond = Math.ceil(phase.targetEps / SQS_MAX_BATCH_SIZE); + const start = Date.now(); + let sent = 0; + + for (let second = 0; second < phase.durationSecs; second++) { + const waveStart = Date.now(); + + const batchResults = await Promise.all( + Array.from({ length: batchesPerSecond }, () => { + const batch = Array.from({ length: SQS_MAX_BATCH_SIZE }, () => + createEvent(selectWeighted(eventMix)), + ); + return sendSqsBatch(client, queueUrl, batch).then(() => batch.length); + }), + ); + + sent += batchResults.reduce((sum, count) => sum + count, 0); + + const remaining = 1000 - (Date.now() - waveStart); + if (remaining > 0 && second < phase.durationSecs - 1) { + await new Promise((resolve) => { + setTimeout(resolve, remaining); + }); + } + } + + const durationMs = Date.now() - start; + + return { + targetEps: phase.targetEps, + achievedEps: Math.round(sent / (durationMs / 1000)), + sent, + durationMs, + }; +} diff --git a/lambdas/perf-runner-lambda/src/types.ts b/lambdas/perf-runner-lambda/src/types.ts new file mode 100644 index 00000000..5366602d --- /dev/null +++ b/lambdas/perf-runner-lambda/src/types.ts @@ -0,0 +1,79 @@ +import type { SQSClient } from "@aws-sdk/client-sqs"; +import type { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; +import type { + ChannelStatus, + MessageStatus, +} from "@nhs-notify-client-callbacks/models"; + +export type MessageStatusMixEntry = { + weight: number; + factory: "messageStatus"; + clientId: string; + messageStatus: MessageStatus; +}; + +export type ChannelStatusMixEntry = { + weight: number; + factory: "channelStatus"; + clientId: string; + channelStatus: ChannelStatus; +}; + +export type EventMixEntry = MessageStatusMixEntry | ChannelStatusMixEntry; + +export type Phase = { + durationSecs: number; + targetEps: number; +}; + +export type Scenario = { + phases: Phase[]; + eventMix: EventMixEntry[]; + metricsIntervalSecs: number; +}; + +export type PhaseResult = { + targetEps: number; + achievedEps: number; + sent: number; + durationMs: number; +}; + +export type MetricsSnapshot = { + snapshotAt: number; + p50Ms: number; + p95Ms: number; + p99Ms: number; + count: number; +}; + +export type DeliveryMetricsSnapshot = { + snapshotAt: number; + deliveryCount: number; + p50Ms: number; + p95Ms: number; + p99Ms: number; +}; + +export type PerformanceResult = { + testId: string; + scenario: Scenario; + startedAt: string; + completedAt: string; + phases: PhaseResult[]; + metrics: MetricsSnapshot[]; + deliveryMetrics: DeliveryMetricsSnapshot[]; +}; + +export type PerfRunnerPayload = { + testId: string; + scenario?: Scenario; +}; + +export type RunnerDeps = { + sqsClient: SQSClient; + cloudWatchClient: CloudWatchLogsClient; + queueUrl: string; + logGroupName: string; + deliveryLogGroupPrefix?: string; +}; diff --git a/lambdas/perf-runner-lambda/tsconfig.json b/lambdas/perf-runner-lambda/tsconfig.json new file mode 100644 index 00000000..a50e6fc0 --- /dev/null +++ b/lambdas/perf-runner-lambda/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "isolatedModules": true, + "paths": { + "*": [ + "./src/*" + ] + } + }, + "extends": "../../tsconfig.base.json", + "include": [ + "src/**/*" + ] +} diff --git a/package.json b/package.json index fa6f2223..aeddf03e 100644 --- a/package.json +++ b/package.json @@ -32,15 +32,6 @@ "typescript-eslint": "catalog:lint" }, "name": "nhs-notify-client-callbacks", - "pnpm": { - "overrides": { - "collect-v8-coverage": "^1.0.3", - "pretty-format>react-is": "19.0.0", - "flatted": "^3.4.0", - "fast-xml-parser": "^5.5.6", - "ts-jest>handlebars": "^4.7.9" - } - }, "scripts": { "generate-dependencies": "pnpm -r run --if-present generate-dependencies || true", "lint": "pnpm -r run lint", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 753d95da..c497eafb 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -6,6 +6,9 @@ settings: catalogs: app: + '@redis/client': + specifier: ^1.5.14 + version: 1.6.1 async-wait-until: specifier: ^2.0.31 version: 2.0.31 @@ -15,9 +18,15 @@ catalogs: cloudevents: specifier: ^10.0.0 version: 10.0.0 + node-forge: + specifier: ^1.3.1 + version: 1.4.0 p-map: specifier: ^4.0.0 version: 4.0.0 + picocolors: + specifier: ^1.1.1 + version: 1.1.1 pino: specifier: ^10.3.1 version: 10.3.1 @@ -31,6 +40,9 @@ catalogs: specifier: ^4.3.6 version: 4.3.6 aws: + '@aws-crypto/sha256-js': + specifier: ^5.2.0 + version: 5.2.0 '@aws-sdk/client-cloudwatch': specifier: ^3.1025.0 version: 3.1029.0 @@ -40,6 +52,9 @@ catalogs: '@aws-sdk/client-s3': specifier: ^3.1024.0 version: 3.1029.0 + '@aws-sdk/client-secrets-manager': + specifier: ^3.1023.0 + version: 3.1029.0 '@aws-sdk/client-sqs': specifier: ^3.1023.0 version: 3.1026.0 @@ -52,6 +67,9 @@ catalogs: '@aws-sdk/credential-providers': specifier: ^3.1023.0 version: 3.1026.0 + '@smithy/signature-v4': + specifier: ^5.0.0 + version: 5.3.13 lint: '@eslint/js': specifier: ^9.39.4 @@ -134,8 +152,11 @@ catalogs: specifier: ^8.10.161 version: 8.10.161 '@types/node': - specifier: ^24.12.0 - version: 24.12.0 + specifier: ^25.5.0 + version: 25.6.0 + '@types/node-forge': + specifier: ^1.3.11 + version: 1.3.14 '@types/yargs': specifier: ^17.0.24 version: 17.0.35 @@ -204,7 +225,7 @@ importers: version: 4.16.2(@typescript-eslint/utils@8.58.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint-import-resolver-node@0.3.9)(eslint@9.39.4(jiti@2.6.1)) eslint-plugin-jest: specifier: catalog:lint - version: 29.15.2(@typescript-eslint/eslint-plugin@8.58.0(@typescript-eslint/parser@8.58.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(jest@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.15.2(@typescript-eslint/eslint-plugin@8.58.0(@typescript-eslint/parser@8.58.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) eslint-plugin-json: specifier: catalog:lint version: 4.0.1 @@ -231,10 +252,10 @@ importers: version: 63.0.0(eslint@9.39.4(jiti@2.6.1)) jest: specifier: catalog:test - version: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) jest-html-reporter: specifier: catalog:test - version: 4.4.0(jest@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3))) + version: 4.4.0(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3))) knip: specifier: catalog:tools version: 6.4.1(@emnapi/core@1.9.1)(@emnapi/runtime@1.9.1) @@ -243,10 +264,10 @@ importers: version: 5.0.1 ts-jest: specifier: catalog:test - version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) ts-node: specifier: catalog:tools - version: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + version: 10.9.2(@types/node@25.6.0)(typescript@5.9.3) tsx: specifier: catalog:tools version: 4.21.0 @@ -264,9 +285,9 @@ importers: '@aws-sdk/client-s3': specifier: catalog:aws version: 3.1029.0 - '@aws-sdk/client-ssm': - specifier: catalog:aws - version: 3.1029.0 + '@nhs-notify-client-callbacks/config-subscription-cache': + specifier: workspace:* + version: link:../../src/config-subscription-cache '@nhs-notify-client-callbacks/logger': specifier: workspace:* version: link:../../src/logger @@ -300,13 +321,89 @@ importers: version: 30.0.0 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 eslint: specifier: catalog:lint version: 9.39.4(jiti@2.6.1) jest: specifier: catalog:test - version: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) + typescript: + specifier: catalog:tools + version: 5.9.3 + + lambdas/https-client-lambda: + dependencies: + '@aws-crypto/sha256-js': + specifier: catalog:aws + version: 5.2.0 + '@aws-sdk/client-s3': + specifier: catalog:aws + version: 3.1029.0 + '@aws-sdk/client-secrets-manager': + specifier: catalog:aws + version: 3.1029.0 + '@aws-sdk/client-sqs': + specifier: catalog:aws + version: 3.1026.0 + '@aws-sdk/client-ssm': + specifier: catalog:aws + version: 3.1029.0 + '@aws-sdk/credential-providers': + specifier: catalog:aws + version: 3.1026.0 + '@nhs-notify-client-callbacks/config-subscription-cache': + specifier: workspace:* + version: link:../../src/config-subscription-cache + '@nhs-notify-client-callbacks/logger': + specifier: workspace:* + version: link:../../src/logger + '@nhs-notify-client-callbacks/models': + specifier: workspace:* + version: link:../../src/models + '@redis/client': + specifier: catalog:app + version: 1.6.1 + '@smithy/signature-v4': + specifier: catalog:aws + version: 5.3.13 + aws-embedded-metrics: + specifier: catalog:app + version: 4.2.1 + esbuild: + specifier: catalog:tools + version: 0.28.0 + node-forge: + specifier: catalog:app + version: 1.4.0 + p-map: + specifier: catalog:app + version: 4.0.0 + devDependencies: + '@tsconfig/node22': + specifier: catalog:tools + version: 22.0.5 + '@types/aws-lambda': + specifier: catalog:tools + version: 8.10.161 + '@types/jest': + specifier: catalog:test + version: 30.0.0 + '@types/node': + specifier: catalog:tools + version: 25.6.0 + '@types/node-forge': + specifier: catalog:tools + version: 1.3.14 + eslint: + specifier: catalog:lint + version: 9.39.4(jiti@2.6.1) + fengari: + specifier: ^0.1.5 + version: 0.1.5 + jest: + specifier: catalog:test + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) typescript: specifier: catalog:tools version: 5.9.3 @@ -334,56 +431,71 @@ importers: version: 30.0.0 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 eslint: specifier: catalog:lint version: 9.39.4(jiti@2.6.1) jest: specifier: catalog:test - version: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) jest-html-reporter: specifier: catalog:test - version: 4.4.0(jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3))) + version: 4.4.0(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3))) ts-jest: specifier: catalog:test - version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) typescript: specifier: catalog:tools version: 5.9.3 - src/logger: + lambdas/perf-runner-lambda: dependencies: - pino: - specifier: catalog:app - version: 10.3.1 + '@aws-sdk/client-cloudwatch-logs': + specifier: catalog:aws + version: 3.1026.0 + '@aws-sdk/client-sqs': + specifier: catalog:aws + version: 3.1026.0 + '@nhs-notify-client-callbacks/logger': + specifier: workspace:* + version: link:../../src/logger + '@nhs-notify-client-callbacks/models': + specifier: workspace:* + version: link:../../src/models + esbuild: + specifier: catalog:tools + version: 0.28.0 devDependencies: '@tsconfig/node22': specifier: catalog:tools version: 22.0.5 + '@types/aws-lambda': + specifier: catalog:tools + version: 8.10.161 '@types/jest': specifier: catalog:test version: 30.0.0 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 eslint: specifier: catalog:lint version: 9.39.4(jiti@2.6.1) jest: specifier: catalog:test - version: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) ts-jest: specifier: catalog:test - version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) typescript: specifier: catalog:tools version: 5.9.3 - src/models: + src/config-cache: dependencies: - zod: - specifier: catalog:app - version: 4.3.6 + '@nhs-notify-client-callbacks/models': + specifier: workspace:* + version: link:../models devDependencies: '@tsconfig/node22': specifier: catalog:tools @@ -393,43 +505,59 @@ importers: version: 30.0.0 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 eslint: specifier: catalog:lint version: 9.39.4(jiti@2.6.1) jest: specifier: catalog:test - version: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) ts-jest: specifier: catalog:test - version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) typescript: specifier: catalog:tools version: 5.9.3 - tests/integration: + src/config-subscription-cache: dependencies: - '@aws-sdk/client-cloudwatch': + '@aws-sdk/client-s3': specifier: catalog:aws version: 3.1029.0 - '@aws-sdk/client-cloudwatch-logs': - specifier: catalog:aws - version: 3.1026.0 - '@aws-sdk/client-sqs': - specifier: catalog:aws - version: 3.1026.0 '@nhs-notify-client-callbacks/logger': specifier: workspace:* - version: link:../../src/logger + version: link:../logger '@nhs-notify-client-callbacks/models': specifier: workspace:* - version: link:../../src/models - '@nhs-notify-client-callbacks/test-support': - specifier: workspace:* - version: link:../test-support - async-wait-until: + version: link:../models + devDependencies: + '@tsconfig/node22': + specifier: catalog:tools + version: 22.0.5 + '@types/jest': + specifier: catalog:test + version: 30.0.0 + '@types/node': + specifier: catalog:tools + version: 25.6.0 + eslint: + specifier: catalog:lint + version: 9.39.4(jiti@2.6.1) + jest: + specifier: catalog:test + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) + ts-jest: + specifier: catalog:test + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) + typescript: + specifier: catalog:tools + version: 5.9.3 + + src/logger: + dependencies: + pino: specifier: catalog:app - version: 2.0.31 + version: 10.3.1 devDependencies: '@tsconfig/node22': specifier: catalog:tools @@ -439,25 +567,62 @@ importers: version: 30.0.0 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 eslint: specifier: catalog:lint version: 9.39.4(jiti@2.6.1) jest: specifier: catalog:test - version: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) + ts-jest: + specifier: catalog:test + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) typescript: specifier: catalog:tools version: 5.9.3 - tests/performance: + src/models: dependencies: + zod: + specifier: catalog:app + version: 4.3.6 + devDependencies: + '@tsconfig/node22': + specifier: catalog:tools + version: 22.0.5 + '@types/jest': + specifier: catalog:test + version: 30.0.0 + '@types/node': + specifier: catalog:tools + version: 25.6.0 + eslint: + specifier: catalog:lint + version: 9.39.4(jiti@2.6.1) + jest: + specifier: catalog:test + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) + ts-jest: + specifier: catalog:test + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) + typescript: + specifier: catalog:tools + version: 5.9.3 + + tests/integration: + dependencies: + '@aws-sdk/client-cloudwatch': + specifier: catalog:aws + version: 3.1029.0 '@aws-sdk/client-cloudwatch-logs': specifier: catalog:aws version: 3.1026.0 '@aws-sdk/client-sqs': specifier: catalog:aws version: 3.1026.0 + '@nhs-notify-client-callbacks/logger': + specifier: workspace:* + version: link:../../src/logger '@nhs-notify-client-callbacks/models': specifier: workspace:* version: link:../../src/models @@ -476,13 +641,13 @@ importers: version: 30.0.0 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 eslint: specifier: catalog:lint version: 9.39.4(jiti@2.6.1) jest: specifier: catalog:test - version: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) typescript: specifier: catalog:tools version: 5.9.3 @@ -504,7 +669,7 @@ importers: version: 22.0.5 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 eslint: specifier: catalog:lint version: 9.39.4(jiti@2.6.1) @@ -529,6 +694,9 @@ importers: '@nhs-notify-client-callbacks/models': specifier: workspace:* version: link:../../src/models + picocolors: + specifier: catalog:app + version: 1.1.1 table: specifier: catalog:app version: 6.9.0 @@ -544,7 +712,7 @@ importers: version: 30.0.0 '@types/node': specifier: catalog:tools - version: 24.12.0 + version: 25.6.0 '@types/yargs': specifier: catalog:tools version: 17.0.35 @@ -553,10 +721,10 @@ importers: version: 9.39.4(jiti@2.6.1) jest: specifier: catalog:test - version: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) + version: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) ts-jest: specifier: catalog:test - version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)))(typescript@5.9.3) + version: 29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3) tsx: specifier: catalog:tools version: 4.21.0 @@ -605,6 +773,10 @@ packages: resolution: {integrity: sha512-OuA8RZTxsAaHDcI25j2NGLMaYFI2WpJdDzK3uLmVBmaHwjQKQZOUDVVBcln8pNo3IgkY+HRSJhRR4/xlM//UyQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/client-secrets-manager@3.1029.0': + resolution: {integrity: sha512-OtNiJSEXA8+KkFA1aS24BOFkJoRlxwJ8tBLiUUYKVwLu8L3Smfz2oj4BJwRlv0FzWTqrmJkFC8kly/cAZqU2UQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/client-sqs@3.1026.0': resolution: {integrity: sha512-b7z2WI1tqObk4U7vUbmBfXIeFhxKbFr7xQ4rWi879iFl5aSPvpd1WAmLi6z1boVKTEwEqHALuE5MyGBHhOCy5A==} engines: {node: '>=20.0.0'} @@ -1697,6 +1869,10 @@ packages: resolution: {integrity: sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA==} engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} + '@redis/client@1.6.1': + resolution: {integrity: sha512-/KCsg3xSlR+nCK8/8ZYSknYxvXHwubJrU82F3Lm1Fp6789VQ0/3RJKfsmRXjqfaTA++23CvC3hqmqe/2GEt6Kw==} + engines: {node: '>=14'} + '@rtsao/scc@1.1.0': resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==} @@ -1985,11 +2161,11 @@ packages: '@types/json5@0.0.29': resolution: {integrity: sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==} - '@types/node@24.12.0': - resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==} + '@types/node-forge@1.3.14': + resolution: {integrity: sha512-mhVF2BnD4BO+jtOp7z1CdzaK4mbuK0LLQYAvdOLqHTavxFNq4zA1EmYkpnFjP8HOUzedfQkRnp0E2ulSAYSzAw==} - '@types/node@25.5.0': - resolution: {integrity: sha512-jp2P3tQMSxWugkCUKLRPVUpGaL5MVFwF8RDuSRztfwgN1wmqJeMSbKlnEtQqU8UrhTmzEmZdu2I6v2dpp7XIxw==} + '@types/node@25.6.0': + resolution: {integrity: sha512-+qIYRKdNYJwY3vRCZMdJbPLJAtGjQBudzZzdzwQYkEPQd+PJGixUL5QfvCLDaULoLv+RhT3LDkwEfKaAkgSmNQ==} '@types/stack-utils@2.0.3': resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==} @@ -2453,6 +2629,10 @@ packages: resolution: {integrity: sha512-uyzC+PpMMRawbouHO+3mlisr3QfEDObmo2pN4oTTF6dZncZgpIzdasZx0tRBFI1dMsqCLZZXMtz8cUuvYqHdbw==} engines: {node: '>=20 <=24'} + cluster-key-slot@1.1.2: + resolution: {integrity: sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA==} + engines: {node: '>=0.10.0'} + co@4.6.0: resolution: {integrity: sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==} engines: {iojs: '>= 1.0.0', node: '>= 0.12.0'} @@ -2956,6 +3136,9 @@ packages: picomatch: optional: true + fengari@0.1.5: + resolution: {integrity: sha512-0DS4Nn4rV8qyFlQCpKK8brT61EUtswynrpfFTcgLErcilBIBskSMQ86fO2WVuybr14ywyKdRjv91FiRZwnEuvQ==} + fflate@0.8.1: resolution: {integrity: sha512-/exOvEuc+/iaUm105QIiOt4LpBdMTWsXxqR0HDF35vx3fmaKzw7354gTilCh5rkzEt8WYyG//ku3h3nRmd7CHQ==} @@ -3024,6 +3207,10 @@ packages: resolution: {integrity: sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==} engines: {node: '>= 0.4'} + generic-pool@3.9.0: + resolution: {integrity: sha512-hymDOu5B53XvN4QT9dBmZxPX4CWhBPPLguTZ9MMFeFa/Kg0xWVfylOVNlJji/E7yTZWFd/q9GO5TxDLq156D7g==} + engines: {node: '>= 4'} + gensync@1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -3675,6 +3862,10 @@ packages: resolution: {integrity: sha512-pyFS63ptit/P5WqUkt+UUfe+4oevH+bFeIiPPdfb0pFeYEu/1ELnJu5l+5EcTKYL5M7zaAa7S8ddywgXypqKCw==} engines: {node: '>= 0.4'} + node-forge@1.4.0: + resolution: {integrity: sha512-LarFH0+6VfriEhqMMcLX2F7SwSXeWwnEAJEsYm5QKWchiVYVvJyV9v7UDvUv+w5HO23ZpQTXDv/GxdDdMyOuoQ==} + engines: {node: '>= 6.13.0'} + node-int64@0.4.0: resolution: {integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==} @@ -3888,6 +4079,10 @@ packages: react-is@19.0.0: resolution: {integrity: sha512-H91OHcwjZsbq3ClIDHMzBShc1rotbfACdWENsmEf0IFvZ3FgGPtdHMcsv45bQ1hAbgdfiA8SnxTKfDS+x/8m2g==} + readline-sync@1.4.10: + resolution: {integrity: sha512-gNva8/6UAe8QYepIQH/jQ2qn91Qj0B9sYjMBBs3QOB8F2CXcKgLxQaJRP76sWVRQt+QU+8fAkCbCvjjMFu7Ycw==} + engines: {node: '>= 0.8.0'} + real-require@0.2.0: resolution: {integrity: sha512-57frrGM/OCTLqLOAh0mhVA9VBMHd+9U7Zb2THMGdBUoZVOtGbJzjxsYGDJ3A9AYYCP4hn6y1TVbaOfzWtm5GFg==} engines: {node: '>= 12.13.0'} @@ -4060,6 +4255,9 @@ packages: sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + sprintf-js@1.1.3: + resolution: {integrity: sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==} + stable-hash-x@0.2.0: resolution: {integrity: sha512-o3yWv49B/o4QZk5ZcsALc6t0+eCelPc44zZsLtCQnZPDwFpDYSWcDnrv2TtMmMbQ7uKo3J0HTURCqckw23czNQ==} engines: {node: '>=12.0.0'} @@ -4178,6 +4376,10 @@ packages: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} + tmp@0.2.5: + resolution: {integrity: sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==} + engines: {node: '>=14.14'} + tmpl@1.0.5: resolution: {integrity: sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==} @@ -4310,11 +4512,8 @@ packages: resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==} engines: {node: '>= 0.4'} - undici-types@7.16.0: - resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} - - undici-types@7.18.2: - resolution: {integrity: sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==} + undici-types@7.19.2: + resolution: {integrity: sha512-qYVnV5OEm2AW8cJMCpdV20CDyaN3g0AjDlOGf1OW4iaDEx8MwdtChUp4zu4H0VP3nDRF/8RKWH+IPp9uW0YGZg==} unrs-resolver@1.11.1: resolution: {integrity: sha512-bSjt9pjaEBnNiGgc9rUiHGKv5l4/TGzDmYw3RhnkJGtLhbnnA/5qJj7x3dNDCRx/PJxu774LlH8lCOlB4hEfKg==} @@ -4418,6 +4617,9 @@ packages: yallist@3.1.1: resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + yaml@2.8.3: resolution: {integrity: sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg==} engines: {node: '>= 14.6'} @@ -4702,6 +4904,50 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/client-secrets-manager@3.1029.0': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.27 + '@aws-sdk/credential-provider-node': 3.972.30 + '@aws-sdk/middleware-host-header': 3.972.9 + '@aws-sdk/middleware-logger': 3.972.9 + '@aws-sdk/middleware-recursion-detection': 3.972.10 + '@aws-sdk/middleware-user-agent': 3.972.29 + '@aws-sdk/region-config-resolver': 3.972.11 + '@aws-sdk/types': 3.973.7 + '@aws-sdk/util-endpoints': 3.996.6 + '@aws-sdk/util-user-agent-browser': 3.972.9 + '@aws-sdk/util-user-agent-node': 3.973.15 + '@smithy/config-resolver': 4.4.14 + '@smithy/core': 3.23.14 + '@smithy/fetch-http-handler': 5.3.16 + '@smithy/hash-node': 4.2.13 + '@smithy/invalid-dependency': 4.2.13 + '@smithy/middleware-content-length': 4.2.13 + '@smithy/middleware-endpoint': 4.4.29 + '@smithy/middleware-retry': 4.5.0 + '@smithy/middleware-serde': 4.2.17 + '@smithy/middleware-stack': 4.2.13 + '@smithy/node-config-provider': 4.3.13 + '@smithy/node-http-handler': 4.5.2 + '@smithy/protocol-http': 5.3.13 + '@smithy/smithy-client': 4.12.9 + '@smithy/types': 4.14.0 + '@smithy/url-parser': 4.2.13 + '@smithy/util-base64': 4.3.2 + '@smithy/util-body-length-browser': 4.2.2 + '@smithy/util-body-length-node': 4.2.3 + '@smithy/util-defaults-mode-browser': 4.3.45 + '@smithy/util-defaults-mode-node': 4.2.49 + '@smithy/util-endpoints': 3.3.4 + '@smithy/util-middleware': 4.2.13 + '@smithy/util-retry': 4.3.0 + '@smithy/util-utf8': 4.2.2 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/client-sqs@3.1026.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 @@ -5664,13 +5910,13 @@ snapshots: '@jest/console@30.3.0': dependencies: '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 chalk: 4.1.2 jest-message-util: 30.3.0 jest-util: 30.3.0 slash: 3.0.0 - '@jest/core@30.3.0(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3))': + '@jest/core@30.3.0(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3))': dependencies: '@jest/console': 30.3.0 '@jest/pattern': 30.0.1 @@ -5678,49 +5924,14 @@ snapshots: '@jest/test-result': 30.3.0 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 ansi-escapes: 4.3.2 chalk: 4.1.2 ci-info: 4.4.0 exit-x: 0.2.2 graceful-fs: 4.2.11 jest-changed-files: 30.3.0 - jest-config: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) - jest-haste-map: 30.3.0 - jest-message-util: 30.3.0 - jest-regex-util: 30.0.1 - jest-resolve: 30.3.0 - jest-resolve-dependencies: 30.3.0 - jest-runner: 30.3.0 - jest-runtime: 30.3.0 - jest-snapshot: 30.3.0 - jest-util: 30.3.0 - jest-validate: 30.3.0 - jest-watcher: 30.3.0 - pretty-format: 30.3.0 - slash: 3.0.0 - transitivePeerDependencies: - - babel-plugin-macros - - esbuild-register - - supports-color - - ts-node - - '@jest/core@30.3.0(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3))': - dependencies: - '@jest/console': 30.3.0 - '@jest/pattern': 30.0.1 - '@jest/reporters': 30.3.0 - '@jest/test-result': 30.3.0 - '@jest/transform': 30.3.0 - '@jest/types': 30.3.0 - '@types/node': 25.5.0 - ansi-escapes: 4.3.2 - chalk: 4.1.2 - ci-info: 4.4.0 - exit-x: 0.2.2 - graceful-fs: 4.2.11 - jest-changed-files: 30.3.0 - jest-config: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + jest-config: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) jest-haste-map: 30.3.0 jest-message-util: 30.3.0 jest-regex-util: 30.0.1 @@ -5746,7 +5957,7 @@ snapshots: dependencies: '@jest/fake-timers': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 jest-mock: 30.3.0 '@jest/expect-utils@30.3.0': @@ -5764,7 +5975,7 @@ snapshots: dependencies: '@jest/types': 30.3.0 '@sinonjs/fake-timers': 15.3.2 - '@types/node': 25.5.0 + '@types/node': 25.6.0 jest-message-util: 30.3.0 jest-mock: 30.3.0 jest-util: 30.3.0 @@ -5782,7 +5993,7 @@ snapshots: '@jest/pattern@30.0.1': dependencies: - '@types/node': 25.5.0 + '@types/node': 25.6.0 jest-regex-util: 30.0.1 '@jest/reporters@30.3.0': @@ -5793,7 +6004,7 @@ snapshots: '@jest/transform': 30.3.0 '@jest/types': 30.3.0 '@jridgewell/trace-mapping': 0.3.31 - '@types/node': 25.5.0 + '@types/node': 25.6.0 chalk: 4.1.2 collect-v8-coverage: 1.0.3 exit-x: 0.2.2 @@ -5869,7 +6080,7 @@ snapshots: '@jest/schemas': 30.0.5 '@types/istanbul-lib-coverage': 2.0.6 '@types/istanbul-reports': 3.0.4 - '@types/node': 25.5.0 + '@types/node': 25.6.0 '@types/yargs': 17.0.35 chalk: 4.1.2 @@ -6068,6 +6279,12 @@ snapshots: '@pkgr/core@0.2.9': {} + '@redis/client@1.6.1': + dependencies: + cluster-key-slot: 1.1.2 + generic-pool: 3.9.0 + yallist: 4.0.0 + '@rtsao/scc@1.1.0': {} '@sinclair/typebox@0.34.49': {} @@ -6495,13 +6712,13 @@ snapshots: '@types/json5@0.0.29': {} - '@types/node@24.12.0': + '@types/node-forge@1.3.14': dependencies: - undici-types: 7.16.0 + '@types/node': 25.6.0 - '@types/node@25.5.0': + '@types/node@25.6.0': dependencies: - undici-types: 7.18.2 + undici-types: 7.19.2 '@types/stack-utils@2.0.3': {} @@ -6984,6 +7201,8 @@ snapshots: util: 0.12.5 uuid: 8.3.2 + cluster-key-slot@1.1.2: {} + co@4.6.0: {} collect-v8-coverage@1.0.3: {} @@ -7418,13 +7637,13 @@ snapshots: - eslint-import-resolver-webpack - supports-color - eslint-plugin-jest@29.15.2(@typescript-eslint/eslint-plugin@8.58.0(@typescript-eslint/parser@8.58.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(jest@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)))(typescript@5.9.3): + eslint-plugin-jest@29.15.2(@typescript-eslint/eslint-plugin@8.58.0(@typescript-eslint/parser@8.58.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3): dependencies: '@typescript-eslint/utils': 8.58.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) eslint: 9.39.4(jiti@2.6.1) optionalDependencies: '@typescript-eslint/eslint-plugin': 8.58.0(@typescript-eslint/parser@8.58.0(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3))(eslint@9.39.4(jiti@2.6.1))(typescript@5.9.3) - jest: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + jest: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) typescript: 5.9.3 transitivePeerDependencies: - supports-color @@ -7704,6 +7923,12 @@ snapshots: optionalDependencies: picomatch: 4.0.4 + fengari@0.1.5: + dependencies: + readline-sync: 1.4.10 + sprintf-js: 1.1.3 + tmp: 0.2.5 + fflate@0.8.1: {} file-entry-cache@8.0.0: @@ -7768,6 +7993,8 @@ snapshots: generator-function@2.0.1: {} + generic-pool@3.9.0: {} + gensync@1.0.0-beta.2: {} get-caller-file@2.0.5: {} @@ -8121,7 +8348,7 @@ snapshots: '@jest/expect': 30.3.0 '@jest/test-result': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 chalk: 4.1.2 co: 4.6.0 dedent: 1.7.2 @@ -8141,34 +8368,15 @@ snapshots: - babel-plugin-macros - supports-color - jest-cli@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)): - dependencies: - '@jest/core': 30.3.0(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) - '@jest/test-result': 30.3.0 - '@jest/types': 30.3.0 - chalk: 4.1.2 - exit-x: 0.2.2 - import-local: 3.2.0 - jest-config: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) - jest-util: 30.3.0 - jest-validate: 30.3.0 - yargs: 17.7.2 - transitivePeerDependencies: - - '@types/node' - - babel-plugin-macros - - esbuild-register - - supports-color - - ts-node - - jest-cli@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)): + jest-cli@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)): dependencies: - '@jest/core': 30.3.0(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + '@jest/core': 30.3.0(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) '@jest/test-result': 30.3.0 '@jest/types': 30.3.0 chalk: 4.1.2 exit-x: 0.2.2 import-local: 3.2.0 - jest-config: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + jest-config: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) jest-util: 30.3.0 jest-validate: 30.3.0 yargs: 17.7.2 @@ -8179,7 +8387,7 @@ snapshots: - supports-color - ts-node - jest-config@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)): + jest-config@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)): dependencies: '@babel/core': 7.29.0 '@jest/get-type': 30.1.0 @@ -8205,72 +8413,8 @@ snapshots: slash: 3.0.0 strip-json-comments: 3.1.1 optionalDependencies: - '@types/node': 24.12.0 - ts-node: 10.9.2(@types/node@24.12.0)(typescript@5.9.3) - transitivePeerDependencies: - - babel-plugin-macros - - supports-color - - jest-config@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)): - dependencies: - '@babel/core': 7.29.0 - '@jest/get-type': 30.1.0 - '@jest/pattern': 30.0.1 - '@jest/test-sequencer': 30.3.0 - '@jest/types': 30.3.0 - babel-jest: 30.3.0(@babel/core@7.29.0) - chalk: 4.1.2 - ci-info: 4.4.0 - deepmerge: 4.3.1 - glob: 10.5.0 - graceful-fs: 4.2.11 - jest-circus: 30.3.0 - jest-docblock: 30.2.0 - jest-environment-node: 30.3.0 - jest-regex-util: 30.0.1 - jest-resolve: 30.3.0 - jest-runner: 30.3.0 - jest-util: 30.3.0 - jest-validate: 30.3.0 - parse-json: 5.2.0 - pretty-format: 30.3.0 - slash: 3.0.0 - strip-json-comments: 3.1.1 - optionalDependencies: - '@types/node': 25.5.0 - ts-node: 10.9.2(@types/node@24.12.0)(typescript@5.9.3) - transitivePeerDependencies: - - babel-plugin-macros - - supports-color - - jest-config@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)): - dependencies: - '@babel/core': 7.29.0 - '@jest/get-type': 30.1.0 - '@jest/pattern': 30.0.1 - '@jest/test-sequencer': 30.3.0 - '@jest/types': 30.3.0 - babel-jest: 30.3.0(@babel/core@7.29.0) - chalk: 4.1.2 - ci-info: 4.4.0 - deepmerge: 4.3.1 - glob: 10.5.0 - graceful-fs: 4.2.11 - jest-circus: 30.3.0 - jest-docblock: 30.2.0 - jest-environment-node: 30.3.0 - jest-regex-util: 30.0.1 - jest-resolve: 30.3.0 - jest-runner: 30.3.0 - jest-util: 30.3.0 - jest-validate: 30.3.0 - parse-json: 5.2.0 - pretty-format: 30.3.0 - slash: 3.0.0 - strip-json-comments: 3.1.1 - optionalDependencies: - '@types/node': 25.5.0 - ts-node: 10.9.2(@types/node@25.5.0)(typescript@5.9.3) + '@types/node': 25.6.0 + ts-node: 10.9.2(@types/node@25.6.0)(typescript@5.9.3) transitivePeerDependencies: - babel-plugin-macros - supports-color @@ -8299,7 +8443,7 @@ snapshots: '@jest/environment': 30.3.0 '@jest/fake-timers': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 jest-mock: 30.3.0 jest-util: 30.3.0 jest-validate: 30.3.0 @@ -8307,7 +8451,7 @@ snapshots: jest-haste-map@30.3.0: dependencies: '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 anymatch: 3.1.3 fb-watchman: 2.0.2 graceful-fs: 4.2.11 @@ -8319,27 +8463,13 @@ snapshots: optionalDependencies: fsevents: 2.3.3 - jest-html-reporter@4.4.0(jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3))): + jest-html-reporter@4.4.0(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3))): dependencies: '@jest/reporters': 30.3.0 '@jest/test-result': 30.3.0 '@jest/types': 30.3.0 dateformat: 3.0.2 - jest: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) - mkdirp: 1.0.4 - strip-ansi: 6.0.1 - xmlbuilder: 15.0.0 - transitivePeerDependencies: - - node-notifier - - supports-color - - jest-html-reporter@4.4.0(jest@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3))): - dependencies: - '@jest/reporters': 30.3.0 - '@jest/test-result': 30.3.0 - '@jest/types': 30.3.0 - dateformat: 3.0.2 - jest: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + jest: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) mkdirp: 1.0.4 strip-ansi: 6.0.1 xmlbuilder: 15.0.0 @@ -8374,7 +8504,7 @@ snapshots: jest-mock@30.3.0: dependencies: '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 jest-util: 30.3.0 jest-pnp-resolver@1.2.3(jest-resolve@30.3.0): @@ -8408,7 +8538,7 @@ snapshots: '@jest/test-result': 30.3.0 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 chalk: 4.1.2 emittery: 0.13.1 exit-x: 0.2.2 @@ -8437,7 +8567,7 @@ snapshots: '@jest/test-result': 30.3.0 '@jest/transform': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 chalk: 4.1.2 cjs-module-lexer: 2.2.0 collect-v8-coverage: 1.0.3 @@ -8484,7 +8614,7 @@ snapshots: jest-util@30.3.0: dependencies: '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 chalk: 4.1.2 ci-info: 4.4.0 graceful-fs: 4.2.11 @@ -8503,7 +8633,7 @@ snapshots: dependencies: '@jest/test-result': 30.3.0 '@jest/types': 30.3.0 - '@types/node': 25.5.0 + '@types/node': 25.6.0 ansi-escapes: 4.3.2 chalk: 4.1.2 emittery: 0.13.1 @@ -8512,31 +8642,18 @@ snapshots: jest-worker@30.3.0: dependencies: - '@types/node': 25.5.0 + '@types/node': 25.6.0 '@ungap/structured-clone': 1.3.0 jest-util: 30.3.0 merge-stream: 2.0.0 supports-color: 8.1.1 - jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)): - dependencies: - '@jest/core': 30.3.0(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) - '@jest/types': 30.3.0 - import-local: 3.2.0 - jest-cli: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) - transitivePeerDependencies: - - '@types/node' - - babel-plugin-macros - - esbuild-register - - supports-color - - ts-node - - jest@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)): + jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)): dependencies: - '@jest/core': 30.3.0(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + '@jest/core': 30.3.0(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) '@jest/types': 30.3.0 import-local: 3.2.0 - jest-cli: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + jest-cli: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) transitivePeerDependencies: - '@types/node' - babel-plugin-macros @@ -8719,6 +8836,8 @@ snapshots: object.entries: 1.1.9 semver: 6.3.1 + node-forge@1.4.0: {} + node-int64@0.4.0: {} node-releases@2.0.36: {} @@ -8972,6 +9091,8 @@ snapshots: react-is@19.0.0: {} + readline-sync@1.4.10: {} + real-require@0.2.0: {} refa@0.12.1: @@ -9164,6 +9285,8 @@ snapshots: sprintf-js@1.0.3: {} + sprintf-js@1.1.3: {} + stable-hash-x@0.2.0: {} stack-utils@2.0.6: @@ -9303,6 +9426,8 @@ snapshots: fdir: 6.5.0(picomatch@4.0.4) picomatch: 4.0.4 + tmp@0.2.5: {} + tmpl@1.0.5: {} to-regex-range@5.0.1: @@ -9322,33 +9447,12 @@ snapshots: picomatch: 4.0.4 typescript: 5.9.3 - ts-jest@29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)))(typescript@5.9.3): - dependencies: - bs-logger: 0.2.6 - fast-json-stable-stringify: 2.1.0 - handlebars: 4.7.9 - jest: 30.3.0(@types/node@24.12.0)(ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3)) - json5: 2.2.3 - lodash.memoize: 4.1.2 - make-error: 1.3.6 - semver: 7.7.4 - type-fest: 4.41.0 - typescript: 5.9.3 - yargs-parser: 21.1.1 - optionalDependencies: - '@babel/core': 7.29.0 - '@jest/transform': 30.3.0 - '@jest/types': 30.3.0 - babel-jest: 30.3.0(@babel/core@7.29.0) - esbuild: 0.28.0 - jest-util: 30.3.0 - - ts-jest@29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)))(typescript@5.9.3): + ts-jest@29.4.9(@babel/core@7.29.0)(@jest/transform@30.3.0)(@jest/types@30.3.0)(babel-jest@30.3.0(@babel/core@7.29.0))(esbuild@0.28.0)(jest-util@30.3.0)(jest@30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)))(typescript@5.9.3): dependencies: bs-logger: 0.2.6 fast-json-stable-stringify: 2.1.0 handlebars: 4.7.9 - jest: 30.3.0(@types/node@25.5.0)(ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3)) + jest: 30.3.0(@types/node@25.6.0)(ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3)) json5: 2.2.3 lodash.memoize: 4.1.2 make-error: 1.3.6 @@ -9364,33 +9468,14 @@ snapshots: esbuild: 0.28.0 jest-util: 30.3.0 - ts-node@10.9.2(@types/node@24.12.0)(typescript@5.9.3): + ts-node@10.9.2(@types/node@25.6.0)(typescript@5.9.3): dependencies: '@cspotcode/source-map-support': 0.8.1 '@tsconfig/node10': 1.0.12 '@tsconfig/node12': 1.0.11 '@tsconfig/node14': 1.0.3 '@tsconfig/node16': 1.0.4 - '@types/node': 24.12.0 - acorn: 8.16.0 - acorn-walk: 8.3.5 - arg: 4.1.3 - create-require: 1.1.1 - diff: 4.0.4 - make-error: 1.3.6 - typescript: 5.9.3 - v8-compile-cache-lib: 3.0.1 - yn: 3.1.1 - optional: true - - ts-node@10.9.2(@types/node@25.5.0)(typescript@5.9.3): - dependencies: - '@cspotcode/source-map-support': 0.8.1 - '@tsconfig/node10': 1.0.12 - '@tsconfig/node12': 1.0.11 - '@tsconfig/node14': 1.0.3 - '@tsconfig/node16': 1.0.4 - '@types/node': 25.5.0 + '@types/node': 25.6.0 acorn: 8.16.0 acorn-walk: 8.3.5 arg: 4.1.3 @@ -9487,9 +9572,7 @@ snapshots: has-symbols: 1.1.0 which-boxed-primitive: 1.1.1 - undici-types@7.16.0: {} - - undici-types@7.18.2: {} + undici-types@7.19.2: {} unrs-resolver@1.11.1: dependencies: @@ -9639,6 +9722,8 @@ snapshots: yallist@3.1.1: {} + yallist@4.0.0: {} + yaml@2.8.3: {} yargs-parser@20.2.9: {} diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index cc1bdeb4..c343e4f9 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -6,12 +6,22 @@ packages: - "tools/*" blockExoticSubdeps: true +overrides: + collect-v8-coverage: "^1.0.3" + "pretty-format>react-is": "19.0.0" + flatted: "^3.4.0" + fast-xml-parser: "^5.5.6" + "ts-jest>handlebars": "^4.7.9" + catalogs: app: + "@redis/client": "^1.5.14" async-wait-until: "^2.0.31" aws-embedded-metrics: "^4.2.1" cloudevents: "^10.0.0" + node-forge: "^1.3.1" p-map: "^4.0.0" + picocolors: "^1.1.1" pino: "^10.3.1" table: "^6.9.0" yargs: "^17.7.2" @@ -20,10 +30,13 @@ catalogs: "@aws-sdk/client-cloudwatch": "^3.1025.0" "@aws-sdk/client-cloudwatch-logs": "^3.1023.0" "@aws-sdk/client-s3": "^3.1024.0" + "@aws-sdk/client-secrets-manager": "^3.1023.0" "@aws-sdk/client-sqs": "^3.1023.0" "@aws-sdk/client-ssm": "^3.1025.0" + "@aws-crypto/sha256-js": "^5.2.0" "@aws-sdk/client-sts": "^3.1023.0" "@aws-sdk/credential-providers": "^3.1023.0" + "@smithy/signature-v4": "^5.0.0" lint: "@eslint/js": "^9.39.4" "@stylistic/eslint-plugin": "^5.10.0" @@ -53,7 +66,8 @@ catalogs: tools: "@tsconfig/node22": "^22.0.5" "@types/aws-lambda": "^8.10.161" - "@types/node": "^24.12.0" + "@types/node": "^25.5.0" + "@types/node-forge": "^1.3.11" "@types/yargs": "^17.0.24" esbuild: "^0.28.0" knip: "^6.3.1" diff --git a/scripts/config/pre-commit.yaml b/scripts/config/pre-commit.yaml index a7619797..221b38f1 100644 --- a/scripts/config/pre-commit.yaml +++ b/scripts/config/pre-commit.yaml @@ -8,6 +8,7 @@ repos: - id: check-added-large-files - id: check-symlinks - id: detect-private-key + exclude: 'lambdas/https-client-lambda/src/__tests__/tls-agent-factory\.test\.ts' - id: end-of-file-fixer - id: forbid-new-submodules - id: mixed-line-ending @@ -78,3 +79,10 @@ repos: entry: pnpm exec knip --no-progress language: system pass_filenames: false + - repo: local + hooks: + - id: check-lua-format + name: Check Lua format + entry: /usr/bin/env check=branch ./scripts/githooks/check-lua-format.sh + language: script + pass_filenames: false diff --git a/scripts/config/sonar-scanner.properties b/scripts/config/sonar-scanner.properties index b9013bf3..6d5a3f2d 100644 --- a/scripts/config/sonar-scanner.properties +++ b/scripts/config/sonar-scanner.properties @@ -7,3 +7,9 @@ sonar.terraform.provider.aws.version=5.54.1 sonar.cpd.exclusions=**.test.*, src/models/** sonar.coverage.exclusions=tests/test-support/**, tests/**, lambdas/**/src/__tests__/**, src/**/src/__tests__/**, src/models/**, scripts/**/src/__tests__/**, tools/**/src/__tests__/**, **/jest.config.*, **/knip.ts sonar.javascript.lcov.reportPaths=lcov.info + +# typescript:S4325 flags non-null/type assertions as "unnecessary" based on a different +# interpretation than our own strict typechecking enforces, causing conflicting lint loops. +sonar.issue.ignore.multicriteria=e1 +sonar.issue.ignore.multicriteria.e1.ruleKey=typescript:S4325 +sonar.issue.ignore.multicriteria.e1.resourceKey=** diff --git a/scripts/config/vale/styles/config/vocabularies/words/accept.txt b/scripts/config/vale/styles/config/vocabularies/words/accept.txt index ee5b597c..535b5e1d 100644 --- a/scripts/config/vale/styles/config/vocabularies/words/accept.txt +++ b/scripts/config/vale/styles/config/vocabularies/words/accept.txt @@ -1,4 +1,5 @@ ajv +APIs asdf auditability Bitwarden @@ -22,16 +23,18 @@ Grype idempotence Jira namespace +NFRs npm OAuth Octokit onboarding +pnpm Podman Python queryable rawContent read_file -repo +[rR][eE][pP][oO] [Rr]unbook sed Syft @@ -39,3 +42,4 @@ teardown Terraform toolchain Trufflehog +validators diff --git a/scripts/githooks/check-lua-format.sh b/scripts/githooks/check-lua-format.sh new file mode 100755 index 00000000..ec8fdf75 --- /dev/null +++ b/scripts/githooks/check-lua-format.sh @@ -0,0 +1,100 @@ +#!/bin/bash + +set -euo pipefail + +# Pre-commit git hook to lint Lua files using luacheck. Runs luacheck natively +# if installed, otherwise falls back to Docker. +# +# Usage: +# $ [options] ./check-lua-format.sh +# +# Options: +# check={all,staged-changes,working-tree-changes,branch} # Check mode, default is 'working-tree-changes' +# BRANCH_NAME=other-branch-than-main # Branch to compare with, default is `origin/main` +# FORCE_USE_DOCKER=true # If set to true the command is run in a Docker container, default is 'false' +# VERBOSE=true # Show all the executed commands, default is `false` + +# ============================================================================== + +function main() { + + cd "$(git rev-parse --show-toplevel)" + + check=${check:-working-tree-changes} + case $check in + "all") + files="$(git ls-files "*.lua")" + ;; + "staged-changes") + files="$(git diff --diff-filter=ACMRT --name-only --cached "*.lua")" + ;; + "working-tree-changes") + files="$(git diff --diff-filter=ACMRT --name-only "*.lua")" + ;; + "branch") + files="$( (git diff --diff-filter=ACMRT --name-only "${BRANCH_NAME:-origin/main}" "*.lua"; git diff --name-only "*.lua") | sort | uniq )" + ;; + *) + echo "Unrecognised check mode: $check" >&2 && exit 1 + ;; + esac + + if [ -n "$files" ]; then + # shellcheck disable=SC2155 + local globals=$(jq -r '.diagnostics.globals[]' .luarc.json | tr '\n' ' ') + if command -v luacheck > /dev/null 2>&1 && ! is-arg-true "${FORCE_USE_DOCKER:-false}"; then + files="$files" globals="$globals" run-luacheck-natively + else + files="$files" globals="$globals" run-luacheck-in-docker + fi + fi +} + +# Run luacheck natively. +# Arguments (provided as environment variables): +# files=[files to check] +# globals=[space-separated list of global names] +function run-luacheck-natively() { + + # shellcheck disable=SC2086 + luacheck $files --globals $globals +} + +# Run luacheck in a Docker container. +# Arguments (provided as environment variables): +# files=[files to check] +# globals=[space-separated list of global names] +function run-luacheck-in-docker() { + + # shellcheck disable=SC1091 + source ./scripts/docker/docker.lib.sh + + # shellcheck disable=SC2155 + local image=$(name=pipelinecomponents/luacheck docker-get-image-version-and-pull) + # shellcheck disable=SC2086 + docker run --rm --platform linux/amd64 \ + --volume "$PWD":/data \ + --workdir /data \ + --entrypoint luacheck \ + "$image" \ + $files --globals $globals +} + +# ============================================================================== + +function is-arg-true() { + + if [[ "$1" =~ ^(true|yes|y|on|1|TRUE|YES|Y|ON)$ ]]; then + return 0 + else + return 1 + fi +} + +# ============================================================================== + +is-arg-true "${VERBOSE:-false}" && set -x + +main "$@" + +exit 0 diff --git a/scripts/tests/integration-debug.sh b/scripts/tests/integration-debug.sh index ac9fb905..a4ebbd63 100755 --- a/scripts/tests/integration-debug.sh +++ b/scripts/tests/integration-debug.sh @@ -12,16 +12,20 @@ set -euo pipefail # Actions: # queue-status Show SQS queue message counts # queue-peek Peek one message from each SQS queue -# tail-transform Tail client-transform-filter lambda logs -# tail-webhook Tail mock-webhook lambda logs -# tail-pipe Tail EventBridge pipe log group -# pipe-state Show EventBridge pipe state and recent metrics +# tail-transform Tail client-transform-filter lambda logs +# tail-https-client Tail https-client lambda logs (requires CLIENT_ID) +# tail-webhook Tail mock-webhook lambda logs +# tail-pipe Tail EventBridge pipe log group +# pipe-state Show EventBridge pipe state and recent metrics # # Required: # ENVIRONMENT # AWS_PROFILE # ACTION # +# Required for queue-status, queue-peek: +# CLIENT_ID Client ID (e.g. mock-client-1) +# # Optional: # LOG_FILTER CloudWatch Logs filter pattern / text # AWS_REGION (default: eu-west-2) @@ -45,7 +49,7 @@ fi REGION="${AWS_REGION:-eu-west-2}" LOG_FILTER="${LOG_FILTER:-}" -SUBSCRIPTION_FIXTURE_PATH="${SUBSCRIPTION_FIXTURE_PATH:-tests/integration/fixtures/subscriptions/mock-client-1.json}" +CLIENT_ID="${CLIENT_ID:-}" if ! aws sts get-caller-identity --profile "$AWS_PROFILE" >/dev/null 2>&1; then echo "No active AWS SSO session for profile '$AWS_PROFILE'. Running aws sso login..." @@ -69,21 +73,12 @@ queue_url() { echo "https://sqs.${REGION}.amazonaws.com/${ACCOUNT_ID}/${queue_name}" } -target_dlq_queue_name() { - local target_id - - if [ ! -f "$SUBSCRIPTION_FIXTURE_PATH" ]; then - echo "Error: subscription fixture not found: $SUBSCRIPTION_FIXTURE_PATH" >&2 - exit 1 - fi - - target_id="$(jq -r '.targets[0].targetId // empty' "$SUBSCRIPTION_FIXTURE_PATH")" - if [ -z "$target_id" ]; then - echo "Error: unable to read targets[0].targetId from $SUBSCRIPTION_FIXTURE_PATH" >&2 +require_client_id() { + if [ -z "$CLIENT_ID" ]; then + echo "Error: CLIENT_ID must be set for this action." >&2 + echo "Example: CLIENT_ID=mock-client-1 ENVIRONMENT= AWS_PROFILE= make test-integration-debug ACTION=queue-status" >&2 exit 1 fi - - echo "${PREFIX}-${target_id}-dlq-queue" } show_queue_counts() { @@ -101,9 +96,11 @@ show_queue_counts() { } action_queue_status() { - show_queue_counts "Mock Target DLQ - Queue Message Counts" "$(target_dlq_queue_name)" - show_queue_counts "Inbound Event Queue - Queue Message Counts" "${PREFIX}-inbound-event-queue" - show_queue_counts "Inbound Event DLQ - Queue Message Counts" "${PREFIX}-inbound-event-dlq" + require_client_id + show_queue_counts "Client Delivery Queue - Message Counts" "${PREFIX}-${CLIENT_ID}-delivery-queue" + show_queue_counts "Client Delivery DLQ - Message Counts" "${PREFIX}-${CLIENT_ID}-delivery-dlq-queue" + show_queue_counts "Inbound Event Queue - Message Counts" "${PREFIX}-inbound-event-queue" + show_queue_counts "Inbound Event DLQ - Message Counts" "${PREFIX}-inbound-event-dlq" } peek_queue_message() { @@ -128,21 +125,19 @@ peek_queue_message() { } action_queue_peek() { - peek_queue_message "Mock Target DLQ - Message Peek" "$(target_dlq_queue_name)" + require_client_id + peek_queue_message "Client Delivery Queue - Message Peek" "${PREFIX}-${CLIENT_ID}-delivery-queue" + peek_queue_message "Client Delivery DLQ - Message Peek" "${PREFIX}-${CLIENT_ID}-delivery-dlq-queue" peek_queue_message "Inbound Event Queue - Message Peek" "${PREFIX}-inbound-event-queue" peek_queue_message "Inbound Event DLQ - Message Peek" "${PREFIX}-inbound-event-dlq" } log_filter_args() { - local -a args=() - local escaped_log_filter if [[ -n "$LOG_FILTER" ]]; then - escaped_log_filter="${LOG_FILTER//\"/\\\"}" + local escaped_log_filter="${LOG_FILTER//\"/\\\"}" # CloudWatch filter patterns treat quoted strings as exact phrases. - args+=(--filter-pattern "\"$escaped_log_filter\"") + printf '%s\n' --filter-pattern "\"$escaped_log_filter\"" fi - - printf '%s\n' "${args[@]}" } action_tail_transform() { @@ -160,6 +155,22 @@ action_tail_transform() { "${filter_args[@]}" } +action_tail_https_client() { + require_client_id + local -a filter_args=() + mapfile -t filter_args < <(log_filter_args) + + print_section "HTTPS Client Lambda Logs" + aws logs tail \ + "/aws/lambda/${PREFIX}-https-client-${CLIENT_ID}" \ + --region "$REGION" \ + --profile "$AWS_PROFILE" \ + --since 30m \ + --follow \ + --format short \ + "${filter_args[@]}" +} + action_tail_webhook() { local -a filter_args=() mapfile -t filter_args < <(log_filter_args) @@ -266,6 +277,9 @@ case "$ACTION" in tail-transform) action_tail_transform ;; + tail-https-client) + action_tail_https_client + ;; tail-webhook) action_tail_webhook ;; @@ -277,7 +291,7 @@ case "$ACTION" in ;; *) echo "Unknown action: $ACTION" >&2 - echo "Actions: queue-status, queue-peek, tail-transform, tail-webhook, tail-pipe, pipe-state" >&2 + echo "Actions: queue-status, queue-peek, tail-transform, tail-https-client, tail-webhook, tail-pipe, pipe-state" >&2 exit 1 ;; esac diff --git a/scripts/tests/integration-env.sh b/scripts/tests/integration-env.sh index cd5ff1a8..9a889902 100644 --- a/scripts/tests/integration-env.sh +++ b/scripts/tests/integration-env.sh @@ -7,8 +7,12 @@ set -euo pipefail # Add new clients here: "fixture-filename.json:ENV_VAR_PREFIX" CLIENTS=( - "mock-client-1.json:MOCK_CLIENT" - "mock-client-2.json:MOCK_CLIENT_2" + "mock-client-single-target.json:MOCK_CLIENT" + "mock-client-fan-out.json:MOCK_CLIENT_FAN_OUT" + "mock-client-mtls.json:MOCK_CLIENT_MTLS" + "mock-client-rate-limit.json:MOCK_CLIENT_RATE_LIMIT" + "mock-client-circuit-breaker.json:MOCK_CLIENT_CIRCUIT_BREAKER" + "mock-client-short-retry.json:MOCK_CLIENT_SHORT_RETRY" ) for CLIENT_ENTRY in "${CLIENTS[@]}"; do diff --git a/scripts/tests/lua-lint.sh b/scripts/tests/lua-lint.sh new file mode 100755 index 00000000..ae271da4 --- /dev/null +++ b/scripts/tests/lua-lint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +set -euo pipefail + +cd "$(git rev-parse --show-toplevel)" + +check=all ./scripts/githooks/check-lua-format.sh diff --git a/scripts/tests/test.mk b/scripts/tests/test.mk index d9303d92..2bb70740 100644 --- a/scripts/tests/test.mk +++ b/scripts/tests/test.mk @@ -14,6 +14,9 @@ test-unit: # Run your unit tests from scripts/test/unit @Testing test-lint: # Lint your code from scripts/test/lint @Testing make _test name="lint" +test-lua-lint: # Lint Lua scripts @Testing + make _test name="lua-lint" + test-typecheck: # Typecheck your code from scripts/test/typecheck @Testing make _test name="typecheck" @@ -35,7 +38,7 @@ test-integration-local: # Run integration tests locally against a remoptely depl test-integration-debug: # Debug a live environment - inspect queues, tail logs, check pipe state (requires ENVIRONMENT, AWS_PROFILE, ACTION) @Testing make _test name="integration-debug" ACTION="$(or $(ACTION),$(word 2,$(MAKECMDGOALS)))" -queue-status queue-peek tail-transform tail-webhook tail-pipe pipe-state: +queue-status queue-peek tail-transform tail-https-client tail-webhook tail-pipe pipe-state: @: test-load: # Run all your load tests @Testing diff --git a/src/config-cache/jest.config.ts b/src/config-cache/jest.config.ts new file mode 100644 index 00000000..6ecf333b --- /dev/null +++ b/src/config-cache/jest.config.ts @@ -0,0 +1,14 @@ +import { nodeJestConfig } from "../../jest.config.base.ts"; + +export default { + ...nodeJestConfig, + coverageThreshold: { + global: { + ...nodeJestConfig.coverageThreshold?.global, + branches: 100, + functions: 100, + lines: 100, + statements: 100, + }, + }, +}; diff --git a/src/config-cache/package.json b/src/config-cache/package.json new file mode 100644 index 00000000..61bf815f --- /dev/null +++ b/src/config-cache/package.json @@ -0,0 +1,32 @@ +{ + "exports": { + ".": { + "types": "./src/index.ts", + "default": "./src/index.ts" + } + }, + "dependencies": { + "@nhs-notify-client-callbacks/models": "workspace:*" + }, + "devDependencies": { + "@tsconfig/node22": "catalog:tools", + "@types/jest": "catalog:test", + "@types/node": "catalog:tools", + "eslint": "catalog:lint", + "jest": "catalog:test", + "ts-jest": "catalog:test", + "typescript": "catalog:tools" + }, + "engines": { + "node": ">=24.14.1" + }, + "name": "@nhs-notify-client-callbacks/config-cache", + "private": true, + "scripts": { + "lint": "eslint .", + "lint:fix": "eslint . --fix", + "test:unit": "jest", + "typecheck": "tsc --noEmit" + }, + "version": "0.0.1" +} diff --git a/src/config-cache/src/__tests__/config-cache.test.ts b/src/config-cache/src/__tests__/config-cache.test.ts new file mode 100644 index 00000000..179a178a --- /dev/null +++ b/src/config-cache/src/__tests__/config-cache.test.ts @@ -0,0 +1,75 @@ +import type { ClientSubscriptionConfiguration } from "@nhs-notify-client-callbacks/models"; +import { ConfigCache } from "config-cache"; + +const createConfig = (clientId: string): ClientSubscriptionConfiguration => ({ + clientId, + subscriptions: [], + targets: [], +}); + +describe("ConfigCache", () => { + it("stores and retrieves configuration", () => { + const cache = new ConfigCache(60_000); + const config = createConfig("client-1"); + + cache.set("client-1", config); + + expect(cache.get("client-1")).toEqual(config); + }); + + it("returns undefined for non-existent key", () => { + const cache = new ConfigCache(60_000); + + expect(cache.get("non-existent")).toBeUndefined(); + }); + + it("returns cached value without re-fetch when within TTL", () => { + jest.useFakeTimers(); + jest.setSystemTime(new Date("2026-01-01T10:00:00Z")); + + const cache = new ConfigCache(5000); + const config = createConfig("client-1"); + + cache.set("client-1", config); + + jest.advanceTimersByTime(4999); + + expect(cache.get("client-1")).toEqual(config); + + jest.useRealTimers(); + }); + + it("returns undefined for expired entries after TTL", () => { + jest.useFakeTimers(); + jest.setSystemTime(new Date("2026-01-01T10:00:00Z")); + + const cache = new ConfigCache(1000); + const config = createConfig("client-1"); + + cache.set("client-1", config); + expect(cache.get("client-1")).toEqual(config); + + jest.advanceTimersByTime(1001); + + expect(cache.get("client-1")).toBeUndefined(); + + jest.useRealTimers(); + }); + + it("clears all entries", () => { + const cache = new ConfigCache(60_000); + const configA = createConfig("client-a"); + const configB = createConfig("client-b"); + + cache.set("client-a", configA); + cache.set("client-b", configB); + + expect(cache.get("client-a")).toEqual(configA); + expect(cache.get("client-b")).toEqual(configB); + + cache.clear(); + + expect(cache.get("client-a")).toBeUndefined(); + expect(cache.get("client-b")).toBeUndefined(); + }); +}); diff --git a/lambdas/client-transform-filter-lambda/src/services/config-cache.ts b/src/config-cache/src/config-cache.ts similarity index 100% rename from lambdas/client-transform-filter-lambda/src/services/config-cache.ts rename to src/config-cache/src/config-cache.ts diff --git a/src/config-cache/src/index.ts b/src/config-cache/src/index.ts new file mode 100644 index 00000000..1da1a0f1 --- /dev/null +++ b/src/config-cache/src/index.ts @@ -0,0 +1 @@ +export { ConfigCache } from "./config-cache"; diff --git a/src/config-cache/tsconfig.json b/src/config-cache/tsconfig.json new file mode 100644 index 00000000..a50e6fc0 --- /dev/null +++ b/src/config-cache/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "isolatedModules": true, + "paths": { + "*": [ + "./src/*" + ] + } + }, + "extends": "../../tsconfig.base.json", + "include": [ + "src/**/*" + ] +} diff --git a/src/config-subscription-cache/jest.config.ts b/src/config-subscription-cache/jest.config.ts new file mode 100644 index 00000000..6ecf333b --- /dev/null +++ b/src/config-subscription-cache/jest.config.ts @@ -0,0 +1,14 @@ +import { nodeJestConfig } from "../../jest.config.base.ts"; + +export default { + ...nodeJestConfig, + coverageThreshold: { + global: { + ...nodeJestConfig.coverageThreshold?.global, + branches: 100, + functions: 100, + lines: 100, + statements: 100, + }, + }, +}; diff --git a/src/config-subscription-cache/package.json b/src/config-subscription-cache/package.json new file mode 100644 index 00000000..c7bd0be5 --- /dev/null +++ b/src/config-subscription-cache/package.json @@ -0,0 +1,34 @@ +{ + "exports": { + ".": { + "types": "./src/index.ts", + "default": "./src/index.ts" + } + }, + "dependencies": { + "@aws-sdk/client-s3": "catalog:aws", + "@nhs-notify-client-callbacks/logger": "workspace:*", + "@nhs-notify-client-callbacks/models": "workspace:*" + }, + "devDependencies": { + "@tsconfig/node22": "catalog:tools", + "@types/jest": "catalog:test", + "@types/node": "catalog:tools", + "eslint": "catalog:lint", + "jest": "catalog:test", + "ts-jest": "catalog:test", + "typescript": "catalog:tools" + }, + "engines": { + "node": ">=24.14.1" + }, + "name": "@nhs-notify-client-callbacks/config-subscription-cache", + "private": true, + "scripts": { + "lint": "eslint .", + "lint:fix": "eslint . --fix", + "test:unit": "jest", + "typecheck": "tsc --noEmit" + }, + "version": "0.0.1" +} diff --git a/src/config-subscription-cache/src/__tests__/config-subscription-cache.test.ts b/src/config-subscription-cache/src/__tests__/config-subscription-cache.test.ts new file mode 100644 index 00000000..053b2398 --- /dev/null +++ b/src/config-subscription-cache/src/__tests__/config-subscription-cache.test.ts @@ -0,0 +1,157 @@ +import { GetObjectCommand, NoSuchKey } from "@aws-sdk/client-s3"; +import { ConfigSubscriptionCache } from "config-subscription-cache"; + +const mockS3Send = jest.fn(); +jest.mock("@aws-sdk/client-s3", () => { + const actual = jest.requireActual("@aws-sdk/client-s3"); + return { + ...actual, + S3Client: jest.fn().mockImplementation(() => ({ + send: (...args: unknown[]) => mockS3Send(...args), + })), + }; +}); + +jest.mock("@nhs-notify-client-callbacks/logger", () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + debug: jest.fn(), + }, +})); + +const VALID_CONFIG = { + clientId: "client-1", + subscriptions: [], + targets: [ + { + targetId: "target-1", + type: "API", + invocationEndpoint: "https://webhook.example.invalid", + invocationMethod: "POST", + invocationRateLimit: 10, + apiKey: { headerName: "x-api-key", headerValue: "secret" }, + }, + ], +}; + +const makeS3Response = (body: unknown) => ({ + Body: { + transformToString: jest.fn().mockResolvedValue(JSON.stringify(body)), + }, +}); + +const createCache = (ttlMs = 1000) => { + const { S3Client } = jest.requireMock("@aws-sdk/client-s3"); + return new ConfigSubscriptionCache({ + s3Client: new S3Client(), + bucketName: "test-bucket", + keyPrefix: "client_subscriptions/", + ttlMs, + }); +}; + +describe("ConfigSubscriptionCache", () => { + beforeEach(() => { + mockS3Send.mockReset(); + }); + + it("loads and parses valid config from S3", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + const cache = createCache(); + + const result = await cache.loadClientConfig("client-1"); + + expect(result).toEqual(VALID_CONFIG); + expect(mockS3Send).toHaveBeenCalledTimes(1); + expect(mockS3Send.mock.calls[0][0]).toBeInstanceOf(GetObjectCommand); + }); + + it("uses the configured key prefix for S3 requests", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + const cache = createCache(); + + await cache.loadClientConfig("client-1"); + + const command: GetObjectCommand = mockS3Send.mock.calls[0][0]; + expect(command.input.Key).toBe("client_subscriptions/client-1.json"); + expect(command.input.Bucket).toBe("test-bucket"); + }); + + it("returns cached config on subsequent calls", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + const cache = createCache(); + + await cache.loadClientConfig("client-1"); + await cache.loadClientConfig("client-1"); + + expect(mockS3Send).toHaveBeenCalledTimes(1); + }); + + it("re-fetches from S3 after TTL expiry", async () => { + jest.useFakeTimers(); + jest.setSystemTime(new Date("2026-01-01T10:00:00Z")); + + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + const cache = createCache(1000); + + await cache.loadClientConfig("client-1"); + + jest.advanceTimersByTime(1001); + + await cache.loadClientConfig("client-1"); + + expect(mockS3Send).toHaveBeenCalledTimes(2); + + jest.useRealTimers(); + }); + + it("returns undefined when S3 key does not exist", async () => { + mockS3Send.mockRejectedValue(new NoSuchKey({ $metadata: {}, message: "" })); + const cache = createCache(); + + const result = await cache.loadClientConfig("missing-client"); + + expect(result).toBeUndefined(); + }); + + it("throws when config fails validation", async () => { + const invalidConfig = { ...VALID_CONFIG, targets: [{ invalid: true }] }; + mockS3Send.mockResolvedValue(makeS3Response(invalidConfig)); + const cache = createCache(); + + await expect(cache.loadClientConfig("client-1")).rejects.toThrow( + "Invalid client config for 'client-1'", + ); + }); + + it("throws when S3 body is empty", async () => { + mockS3Send.mockResolvedValue({ Body: undefined }); + const cache = createCache(); + + await expect(cache.loadClientConfig("client-1")).rejects.toThrow( + "S3 response body was empty for client 'client-1'", + ); + }); + + it("propagates non-NoSuchKey S3 errors", async () => { + mockS3Send.mockRejectedValue(new Error("S3 access denied")); + const cache = createCache(); + + await expect(cache.loadClientConfig("client-1")).rejects.toThrow( + "S3 access denied", + ); + }); + + it("clears cache on reset", async () => { + mockS3Send.mockResolvedValue(makeS3Response(VALID_CONFIG)); + const cache = createCache(); + + await cache.loadClientConfig("client-1"); + cache.reset(); + await cache.loadClientConfig("client-1"); + + expect(mockS3Send).toHaveBeenCalledTimes(2); + }); +}); diff --git a/src/config-subscription-cache/src/config-subscription-cache.ts b/src/config-subscription-cache/src/config-subscription-cache.ts new file mode 100644 index 00000000..0ce3547c --- /dev/null +++ b/src/config-subscription-cache/src/config-subscription-cache.ts @@ -0,0 +1,110 @@ +import { GetObjectCommand, NoSuchKey, S3Client } from "@aws-sdk/client-s3"; +import type { ClientSubscriptionConfiguration } from "@nhs-notify-client-callbacks/models"; +import { parseClientSubscriptionConfiguration } from "@nhs-notify-client-callbacks/models"; +import { logger } from "@nhs-notify-client-callbacks/logger"; + +type CacheEntry = { + value: ClientSubscriptionConfiguration; + expiresAt: number; +}; + +export type ConfigSubscriptionCacheOptions = { + s3Client: S3Client; + bucketName: string; + keyPrefix: string; + ttlMs: number; +}; + +export class ConfigSubscriptionCache { + private readonly cache = new Map(); + + private readonly s3Client: S3Client; + + private readonly bucketName: string; + + private readonly keyPrefix: string; + + private readonly ttlMs: number; + + constructor(options: ConfigSubscriptionCacheOptions) { + this.s3Client = options.s3Client; + this.bucketName = options.bucketName; + this.keyPrefix = options.keyPrefix; + this.ttlMs = options.ttlMs; + } + + async loadClientConfig( + clientId: string, + ): Promise { + const cached = this.getCached(clientId); + if (cached) { + return cached; + } + + const raw = await this.fetchFromS3(clientId); + if (raw === undefined) { + return undefined; + } + + const parsed = JSON.parse(raw) as unknown; + const result = parseClientSubscriptionConfiguration(parsed); + + if (!result.success) { + throw new Error( + `Invalid client config for '${clientId}': ${result.error.message}`, + ); + } + + this.cache.set(clientId, { + value: result.data, + expiresAt: Date.now() + this.ttlMs, + }); + + logger.info("Client config loaded from S3", { clientId }); + return result.data; + } + + reset(): void { + this.cache.clear(); + } + + // eslint-disable-next-line sonarjs/function-return-type -- cache lookup returns T | undefined + private getCached( + clientId: string, + ): ClientSubscriptionConfiguration | undefined { + const entry = this.cache.get(clientId); + + if (entry && entry.expiresAt <= Date.now()) { + this.cache.delete(clientId); + return undefined; + } + + return entry?.value; + } + + private async fetchFromS3(clientId: string): Promise { + try { + const response = await this.s3Client.send( + new GetObjectCommand({ + Bucket: this.bucketName, + Key: `${this.keyPrefix}${clientId}.json`, + }), + ); + + if (!response.Body) { + throw new Error(`S3 response body was empty for client '${clientId}'`); + } + + return await response.Body.transformToString(); + } catch (error) { + if (error instanceof NoSuchKey) { + logger.info( + "No config found in S3 for client — events will be filtered out", + { clientId }, + ); + return undefined; + } + throw error; + } + } +} diff --git a/src/config-subscription-cache/src/index.ts b/src/config-subscription-cache/src/index.ts new file mode 100644 index 00000000..39a4501b --- /dev/null +++ b/src/config-subscription-cache/src/index.ts @@ -0,0 +1,2 @@ +export { ConfigSubscriptionCache } from "./config-subscription-cache"; +export type { ConfigSubscriptionCacheOptions } from "./config-subscription-cache"; diff --git a/src/config-subscription-cache/tsconfig.json b/src/config-subscription-cache/tsconfig.json new file mode 100644 index 00000000..a50e6fc0 --- /dev/null +++ b/src/config-subscription-cache/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "isolatedModules": true, + "paths": { + "*": [ + "./src/*" + ] + } + }, + "extends": "../../tsconfig.base.json", + "include": [ + "src/**/*" + ] +} diff --git a/src/models/src/__tests__/client-config-schema.test.ts b/src/models/src/__tests__/client-config-schema.test.ts index da1e5429..fa90a061 100644 --- a/src/models/src/__tests__/client-config-schema.test.ts +++ b/src/models/src/__tests__/client-config-schema.test.ts @@ -19,6 +19,8 @@ const expectFailedParse = ( return result; }; +const VALID_SPKI_HASH = "KL/yFsVH+gnkkzdQ+DSlV8xMQOMehksgT6aOqQviOu8="; + const createValidConfig = (): ClientSubscriptionConfiguration => ({ clientId: "client-1", subscriptions: [ @@ -45,6 +47,12 @@ const createValidConfig = (): ClientSubscriptionConfiguration => ({ invocationMethod: "POST", invocationRateLimit: 10, apiKey: { headerName: "x-api-key", headerValue: "secret" }, + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true, spkiHash: VALID_SPKI_HASH }, + }, + }, }, ], }); @@ -147,4 +155,128 @@ describe("parseClientSubscriptionConfiguration", () => { }), ]); }); + + it("parses a valid config with mtls, certPinning, and delivery fields", () => { + const config = createValidConfig(); + config.targets[0].delivery = { + ...config.targets[0].delivery, + maxRetryDurationSeconds: 7200, + circuitBreaker: { enabled: true }, + }; + + expect(parseClientSubscriptionConfiguration(config)).toEqual({ + success: true, + data: config, + }); + }); + + it("returns a failed parse result when delivery.mtls has invalid shape", () => { + const config = createValidConfig(); + (config.targets[0] as Record).delivery = { + mtls: { enabled: "not-a-boolean" }, + }; + + const result = expectFailedParse( + parseClientSubscriptionConfiguration(config), + ); + + expect(result.error.issues).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + path: expect.arrayContaining(["targets", 0, "delivery"]), + }), + ]), + ); + }); + + it("returns a failed parse result when spkiHash has an invalid pattern", () => { + const config = createValidConfig(); + config.targets[0].delivery!.mtls!.certPinning = { + enabled: true, + spkiHash: "not-a-valid-hash", + }; + + const result = expectFailedParse( + parseClientSubscriptionConfiguration(config), + ); + + expect(result.error.issues).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + message: "Invalid SPKI hash", + }), + ]), + ); + }); + + it("returns a failed parse result when certPinning.enabled is true without spkiHash", () => { + const config = createValidConfig(); + config.targets[0].delivery!.mtls!.certPinning = { enabled: true }; + + const result = expectFailedParse( + parseClientSubscriptionConfiguration(config), + ); + + expect(result.error.issues).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + message: "spkiHash is required when certPinning is enabled", + }), + ]), + ); + }); + + it("returns a failed parse result when maxRetryDurationSeconds is zero", () => { + const config = createValidConfig(); + config.targets[0].delivery = { maxRetryDurationSeconds: 0 }; + + const result = expectFailedParse( + parseClientSubscriptionConfiguration(config), + ); + + expect(result.success).toBe(false); + }); + + it("returns a failed parse result when maxRetryDurationSeconds is negative", () => { + const config = createValidConfig(); + config.targets[0].delivery = { maxRetryDurationSeconds: -1 }; + + const result = expectFailedParse( + parseClientSubscriptionConfiguration(config), + ); + + expect(result.success).toBe(false); + }); + + it("returns a failed parse result when maxRetryDurationSeconds is above 43200", () => { + const config = createValidConfig(); + config.targets[0].delivery = { maxRetryDurationSeconds: 43_201 }; + + const result = expectFailedParse( + parseClientSubscriptionConfiguration(config), + ); + + expect(result.success).toBe(false); + }); + + it("accepts maxRetryDurationSeconds below 60", () => { + const config = createValidConfig(); + config.targets[0].delivery = { maxRetryDurationSeconds: 10 }; + + expect(parseClientSubscriptionConfiguration(config).success).toBe(true); + }); + + it("accepts maxRetryDurationSeconds at boundary value 1", () => { + const config = createValidConfig(); + config.targets[0].delivery = { maxRetryDurationSeconds: 1 }; + + expect(parseClientSubscriptionConfiguration(config).success).toBe(true); + }); + + it("accepts maxRetryDurationSeconds at boundary value 43200", () => { + const config = createValidConfig(); + config.targets[0].delivery = { maxRetryDurationSeconds: 43_200 }; + + expect(parseClientSubscriptionConfiguration(config).success).toBe(true); + }); }); diff --git a/src/models/src/client-config-schema.ts b/src/models/src/client-config-schema.ts index b56a9439..cae4587a 100644 --- a/src/models/src/client-config-schema.ts +++ b/src/models/src/client-config-schema.ts @@ -22,6 +22,20 @@ const httpsUrlSchema = z.string().refine( }, ); +const SPKI_HASH_PATTERN = /^[A-Za-z0-9+/]{43}=$/; + +const certPinningSchema = z + .object({ + enabled: z.boolean(), + spkiHash: z + .string() + .regex(SPKI_HASH_PATTERN, "Invalid SPKI hash") + .optional(), + }) + .refine((val) => !val.enabled || val.spkiHash !== undefined, { + message: "spkiHash is required when certPinning is enabled", + }); + const targetSchema = z.object({ targetId: z.string(), type: z.literal("API"), @@ -32,6 +46,22 @@ const targetSchema = z.object({ headerName: z.string(), headerValue: z.string(), }), + delivery: z + .object({ + maxRetryDurationSeconds: z.number().positive().max(43_200).optional(), + circuitBreaker: z + .object({ + enabled: z.boolean(), + }) + .optional(), + mtls: z + .object({ + enabled: z.boolean(), + certPinning: certPinningSchema.optional(), + }) + .optional(), + }) + .optional(), }); const baseSubscriptionSchema = z.object({ diff --git a/src/models/src/client-config.ts b/src/models/src/client-config.ts index 84116353..4d1796d1 100644 --- a/src/models/src/client-config.ts +++ b/src/models/src/client-config.ts @@ -15,6 +15,19 @@ export type CallbackTarget = { headerName: string; headerValue: string; }; + delivery?: { + maxRetryDurationSeconds?: number; + circuitBreaker?: { + enabled: boolean; + }; + mtls?: { + enabled: boolean; + certPinning?: { + enabled: boolean; + spkiHash?: string; + }; + }; + }; }; type SubscriptionConfigurationBase = { diff --git a/tests/integration/README.md b/tests/integration/README.md index a58531b8..0a76bf74 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -50,30 +50,33 @@ All are run via `make test-integration-debug ACTION=`. - [`queue-status`](#queue-status) – SQS queue message counts - [`queue-peek`](#queue-peek) – Peek at one message from each SQS queue - [`tail-transform`](#tail-transform) – Tail the transform/filter Lambda logs +- [`tail-https-client`](#tail-https-client) – Tail the https-client Lambda logs - [`tail-webhook`](#tail-webhook) – Tail the mock-webhook Lambda logs - [`tail-pipe`](#tail-pipe) – Tail the EventBridge pipe logs - [`pipe-state`](#pipe-state) – Show EventBridge pipe state and recent metrics -All log-tailing actions (`tail-transform`, `tail-webhook`, `tail-pipe`) accept an optional `LOG_FILTER` to narrow output to a specific message ID or pattern. +Some actions require `CLIENT_ID` (e.g. `mock-client-single-target`) — see individual actions below. + +All log-tailing actions (`tail-transform`, `tail-https-client`, `tail-webhook`, `tail-pipe`) accept an optional `LOG_FILTER` to narrow output to a specific message ID or pattern. --- ### `queue-status` -Shows approximate message counts for the inbound event queue, inbound event DLQ, and mock target DLQ. +Shows approximate message counts for the inbound event queue, inbound event DLQ, client delivery queue, and client delivery DLQ. Requires `CLIENT_ID`. ```sh -ENVIRONMENT= AWS_PROFILE= make test-integration-debug ACTION=queue-status +CLIENT_ID= ENVIRONMENT= AWS_PROFILE= make test-integration-debug ACTION=queue-status ``` --- ### `queue-peek` -Reads one message (without deleting it) from each of the same three queues, printing body, attributes, and message attributes. +Reads one message (without deleting it) from each of the same four queues, printing body, attributes, and message attributes. Requires `CLIENT_ID`. ```sh -ENVIRONMENT= AWS_PROFILE= make test-integration-debug ACTION=queue-peek +CLIENT_ID= ENVIRONMENT= AWS_PROFILE= make test-integration-debug ACTION=queue-peek ``` --- @@ -94,6 +97,22 @@ ENVIRONMENT= AWS_PROFILE= LOG_FILTER=SOME-MESSAGE-ID make test-int --- +### `tail-https-client` + +Tails CloudWatch logs for the `https-client` Lambda for the given client, following from the last 30 minutes. Requires `CLIENT_ID`. + +```sh +CLIENT_ID= ENVIRONMENT= AWS_PROFILE= make test-integration-debug ACTION=tail-https-client +``` + +Filter to a specific message ID: + +```sh +CLIENT_ID= ENVIRONMENT= AWS_PROFILE= LOG_FILTER=SOME-MESSAGE-ID make test-integration-debug ACTION=tail-https-client +``` + +--- + ### `tail-webhook` Tails CloudWatch logs for the `mock-webhook` Lambda, following from the last 30 minutes. diff --git a/tests/integration/delivery-resilience.test.ts b/tests/integration/delivery-resilience.test.ts new file mode 100644 index 00000000..8b218233 --- /dev/null +++ b/tests/integration/delivery-resilience.test.ts @@ -0,0 +1,254 @@ +import type { + MessageStatusData, + StatusPublishEvent, +} from "@nhs-notify-client-callbacks/models"; +import { + awaitCallback, + awaitCallbacks, + countLogEntries, +} from "./helpers/cloudwatch"; +import { createMessageStatusPublishEvent } from "./helpers/event-factories"; +import { + buildMockWebhookTargetPath, + getClientConfig, +} from "./helpers/mock-client-config"; +import { assertCallbackHeaders } from "./helpers/signature"; +import { + awaitQueueMessage, + deleteMessage, + getQueueDepth, + purgeQueues, + sendSqsEvent, +} from "./helpers/sqs"; +import { + type TestContext, + createTestContext, + destroyTestContext, +} from "./helpers/test-context"; + +describe("Delivery Resilience", () => { + let ctx: TestContext; + + beforeAll(() => { + ctx = createTestContext(); + }); + + afterAll(() => { + destroyTestContext(ctx); + }); + + describe("Retry & Window Exhaustion", () => { + let dlqUrl: string; + let deliveryUrl: string; + + beforeAll(async () => { + const { clientId } = getClientConfig("clientShortRetry"); + dlqUrl = ctx.clientDlqUrl(clientId); + deliveryUrl = ctx.clientDeliveryUrl(clientId); + await purgeQueues(ctx.sqs, [dlqUrl, deliveryUrl]); + }); + + afterAll(async () => { + await purgeQueues(ctx.sqs, [dlqUrl, deliveryUrl]); + }); + + it("should exhaust the retry window on persistent 5xx and route to DLQ", async () => { + const { clientId } = getClientConfig("clientShortRetry"); + const messageId = `force-500-${crypto.randomUUID()}`; + + const event: StatusPublishEvent = + createMessageStatusPublishEvent({ data: { clientId, messageId } }); + + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + + const dlqMessage = await awaitQueueMessage(ctx.sqs, dlqUrl, 90_000); + + expect(dlqMessage.Body).toBeDefined(); + const dlqPayload = JSON.parse(dlqMessage.Body as string); + expect(dlqPayload.payload.data[0].attributes.messageId).toBe(messageId); + + const attemptCount = await countLogEntries( + ctx.cwLogs, + ctx.webhookLogGroup, + `{ $.msg = "Forced status code response" && $.messageId = "${messageId}" }`, + ctx.startTime, + 2, + ); + expect(attemptCount).toBeGreaterThan(1); + + await deleteMessage(ctx.sqs, dlqUrl, dlqMessage); + }, 180_000); + + it("should exhaust the retry window on persistent 429 and route to DLQ", async () => { + const { clientId } = getClientConfig("clientShortRetry"); + const messageId = `force-429-${crypto.randomUUID()}`; + + const event: StatusPublishEvent = + createMessageStatusPublishEvent({ data: { clientId, messageId } }); + + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + + const dlqMessage = await awaitQueueMessage(ctx.sqs, dlqUrl, 90_000); + + expect(dlqMessage.Body).toBeDefined(); + const dlqPayload = JSON.parse(dlqMessage.Body as string); + expect(dlqPayload.payload.data[0].attributes.messageId).toBe(messageId); + + const attemptCount = await countLogEntries( + ctx.cwLogs, + ctx.webhookLogGroup, + `{ $.msg = "Forced status code response" && $.messageId = "${messageId}" }`, + ctx.startTime, + 2, + ); + expect(attemptCount).toBeGreaterThan(1); + + await deleteMessage(ctx.sqs, dlqUrl, dlqMessage); + }, 180_000); + }); + + describe("Rate Limiting", () => { + const BURST_SIZE = 15; + let dlqUrl: string; + let deliveryUrl: string; + let httpsClientLogGroup: string; + + beforeAll(async () => { + const { clientId } = getClientConfig("clientRateLimit"); + dlqUrl = ctx.clientDlqUrl(clientId); + deliveryUrl = ctx.clientDeliveryUrl(clientId); + httpsClientLogGroup = ctx.logGroup(`https-client-${clientId}`); + await purgeQueues(ctx.sqs, [dlqUrl, deliveryUrl]); + }); + + afterAll(async () => { + await purgeQueues(ctx.sqs, [dlqUrl, deliveryUrl]); + }); + + it("should eventually deliver all events in a burst without dropping any to the DLQ", async () => { + const rateLimitConfig = getClientConfig("clientRateLimit"); + const targetPath = buildMockWebhookTargetPath("clientRateLimit"); + + const events = Array.from({ length: BURST_SIZE }, () => + createMessageStatusPublishEvent({ + data: { + clientId: rateLimitConfig.clientId, + messageId: `rate-limit-burst-${crypto.randomUUID()}`, + }, + }), + ); + + await Promise.all( + events.map((event) => + sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event), + ), + ); + + const callbackMap = await awaitCallbacks( + ctx.cwLogs, + ctx.webhookLogGroup, + events.map((e) => e.data.messageId), + "MessageStatus", + 1, + ctx.startTime, + ); + + const deliveredIds = [...callbackMap.keys()]; + const expectedIds = events.map((e) => e.data.messageId); + expect(deliveredIds).toHaveLength(expectedIds.length); + expect(deliveredIds).toEqual(expect.arrayContaining(expectedIds)); + + for (const [, [callback]] of callbackMap) { + expect(callback.path).toBe(targetPath); + assertCallbackHeaders( + callback, + rateLimitConfig.apiKeyVar, + rateLimitConfig.applicationIdVar, + ); + } + + expect(await getQueueDepth(ctx.sqs, dlqUrl)).toBe(0); + + const rateLimitedCount = await countLogEntries( + ctx.cwLogs, + httpsClientLogGroup, + `{ $.msg = "Admission denied" && $.reason = "rate_limited" }`, + ctx.startTime, + 1, + ); + expect(rateLimitedCount).toBeGreaterThanOrEqual(1); + }, 180_000); + }); + + describe("Circuit Breaker", () => { + const CB_BURST_SIZE = 15; + let dlqUrl: string; + let deliveryUrl: string; + let httpsClientLogGroup: string; + + beforeAll(async () => { + const { clientId } = getClientConfig("clientCircuitBreaker"); + dlqUrl = ctx.clientDlqUrl(clientId); + deliveryUrl = ctx.clientDeliveryUrl(clientId); + httpsClientLogGroup = ctx.logGroup(`https-client-${clientId}`); + await purgeQueues(ctx.sqs, [dlqUrl, deliveryUrl]); + }); + + afterAll(async () => { + await purgeQueues(ctx.sqs, [dlqUrl, deliveryUrl]); + }); + + it("should open the circuit breaker after repeated failures and not affect other clients", async () => { + const cbConfig = getClientConfig("clientCircuitBreaker"); + const singleTargetConfig = getClientConfig("clientSingleTarget"); + const singleTargetPath = buildMockWebhookTargetPath("clientSingleTarget"); + + const cbEvents = Array.from({ length: CB_BURST_SIZE }, () => + createMessageStatusPublishEvent({ + data: { + clientId: cbConfig.clientId, + messageId: `force-500-cb-${crypto.randomUUID()}`, + }, + }), + ); + + const normalEvent = createMessageStatusPublishEvent({ + data: { + clientId: singleTargetConfig.clientId, + messageId: `cb-isolation-${crypto.randomUUID()}`, + }, + }); + + await Promise.all([ + ...cbEvents.map((event) => + sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event), + ), + sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, normalEvent), + ]); + + const normalCallback = await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, + normalEvent.data.messageId, + "MessageStatus", + ctx.startTime, + ); + + expect(normalCallback.path).toBe(singleTargetPath); + assertCallbackHeaders( + normalCallback, + singleTargetConfig.apiKeyVar, + singleTargetConfig.applicationIdVar, + ); + + const circuitOpenCount = await countLogEntries( + ctx.cwLogs, + httpsClientLogGroup, + `{ $.msg = "Admission denied" && $.reason = "circuit_open" }`, + ctx.startTime, + 1, + ); + expect(circuitOpenCount).toBeGreaterThanOrEqual(1); + }, 180_000); + }); +}); diff --git a/tests/integration/dlq-alarms.test.ts b/tests/integration/dlq-alarms.test.ts index 1cf3a578..c4f69fa8 100644 --- a/tests/integration/dlq-alarms.test.ts +++ b/tests/integration/dlq-alarms.test.ts @@ -5,14 +5,18 @@ import { } from "@aws-sdk/client-cloudwatch"; import type { DeploymentDetails } from "@nhs-notify-client-callbacks/test-support/helpers"; import { getDeploymentDetails } from "@nhs-notify-client-callbacks/test-support/helpers"; -import { getAllSubscriptionTargetIds } from "./helpers/mock-client-config"; +import { + CLIENT_FIXTURES, + type ClientFixtureKey, + getClientConfig, +} from "./helpers/mock-client-config"; import { buildMockClientDlqQueueUrl } from "./helpers/sqs"; function buildDlqDepthAlarmName( { component, environment, project }: DeploymentDetails, - targetId: string, + clientId: string, ): string { - return `${project}-${environment}-${component}-${targetId}-dlq-depth`; + return `${project}-${environment}-${component}-${clientId}-dlq-depth`; } function getQueueNameFromUrl(queueUrl: string): string { @@ -27,7 +31,7 @@ function getQueueNameFromUrl(queueUrl: string): string { describe("DLQ alarms", () => { let cloudWatchClient: CloudWatchClient; let deploymentDetails: DeploymentDetails; - let targetIds: string[]; + let clientIds: string[]; beforeAll(() => { deploymentDetails = getDeploymentDetails(); @@ -35,22 +39,25 @@ describe("DLQ alarms", () => { region: deploymentDetails.region, }); - targetIds = getAllSubscriptionTargetIds(); + clientIds = (Object.keys(CLIENT_FIXTURES) as ClientFixtureKey[]).map( + (key) => getClientConfig(key).clientId, + ); }); afterAll(() => { cloudWatchClient.destroy(); }); - it("should create a DLQ depth alarm for every target DLQ", async () => { - expect(targetIds.length).toBeGreaterThan(0); + it("should create a DLQ depth alarm for every client DLQ", async () => { + expect(clientIds.length).toBeGreaterThan(0); - for (const targetId of targetIds) { - const alarmName = buildDlqDepthAlarmName(deploymentDetails, targetId); - const targetDlqQueueUrl = buildMockClientDlqQueueUrl(deploymentDetails, [ - { targetId }, - ]); - const targetDlqQueueName = getQueueNameFromUrl(targetDlqQueueUrl); + for (const clientId of clientIds) { + const alarmName = buildDlqDepthAlarmName(deploymentDetails, clientId); + const clientDlqQueueUrl = buildMockClientDlqQueueUrl( + deploymentDetails, + clientId, + ); + const clientDlqQueueName = getQueueNameFromUrl(clientDlqQueueUrl); const response = await cloudWatchClient.send( new DescribeAlarmsCommand({ AlarmNames: [alarmName], @@ -67,7 +74,7 @@ describe("DLQ alarms", () => { expect.arrayContaining([ expect.objectContaining({ Name: "QueueName", - Value: targetDlqQueueName, + Value: clientDlqQueueName, }), ]), ); diff --git a/tests/integration/dlq-redrive.test.ts b/tests/integration/dlq-redrive.test.ts index e88e4920..325c82ed 100644 --- a/tests/integration/dlq-redrive.test.ts +++ b/tests/integration/dlq-redrive.test.ts @@ -1,98 +1,76 @@ -import { GetQueueAttributesCommand, SQSClient } from "@aws-sdk/client-sqs"; -import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; import type { MessageStatusData, StatusPublishEvent, } from "@nhs-notify-client-callbacks/models"; +import { awaitCallback, awaitCallbacks } from "./helpers/cloudwatch"; +import { createMessageStatusPublishEvent } from "./helpers/event-factories"; import { - buildInboundEventQueueUrl, - buildLambdaLogGroupName, - createCloudWatchLogsClient, - createSqsClient, - getDeploymentDetails, -} from "@nhs-notify-client-callbacks/test-support/helpers"; + CLIENT_FIXTURES, + type ClientFixtureKey, + getClientConfig, +} from "./helpers/mock-client-config"; +import sendEventToDlqAndRedrive from "./helpers/redrive"; import { assertCallbackHeaders } from "./helpers/signature"; import { - buildMockClientDlqQueueUrl, + awaitQueueMessage, + deleteMessage, ensureInboundQueueIsEmpty, + getQueueDepth, purgeQueues, sendSqsEvent, } from "./helpers/sqs"; import { - buildMockWebhookTargetPath, - getAllSubscriptionTargetIds, - getMockItClientConfig, -} from "./helpers/mock-client-config"; -import { awaitSignedCallbacksFromWebhookLogGroup } from "./helpers/cloudwatch"; -import { createMessageStatusPublishEvent } from "./helpers/event-factories"; -import sendEventToDlqAndRedrive from "./helpers/redrive"; + type TestContext, + createTestContext, + destroyTestContext, +} from "./helpers/test-context"; describe("DLQ Redrive", () => { - let sqsClient: SQSClient; - let cloudWatchClient: CloudWatchLogsClient; - let dlqQueueUrl!: string; - let allTargetDlqQueueUrls: string[]; - let inboundQueueUrl: string; - let webhookLogGroupName: string; + let ctx: TestContext; + let dlqUrl: string; + let deliveryUrl: string; + let allDlqUrls: string[]; beforeAll(async () => { - const deploymentDetails = getDeploymentDetails(); - const mockClient1 = getMockItClientConfig(); - - const allSubscriptionTargetIds = getAllSubscriptionTargetIds(); - - sqsClient = createSqsClient(deploymentDetails); - cloudWatchClient = createCloudWatchLogsClient(deploymentDetails); + ctx = createTestContext(); + const { clientId } = getClientConfig("clientSingleTarget"); - inboundQueueUrl = buildInboundEventQueueUrl(deploymentDetails); - dlqQueueUrl = buildMockClientDlqQueueUrl( - deploymentDetails, - mockClient1.targets, - ); - allTargetDlqQueueUrls = allSubscriptionTargetIds.map((targetId) => - buildMockClientDlqQueueUrl(deploymentDetails, [{ targetId }]), - ); - webhookLogGroupName = buildLambdaLogGroupName( - deploymentDetails, - "mock-webhook", + dlqUrl = ctx.clientDlqUrl(clientId); + deliveryUrl = ctx.clientDeliveryUrl(clientId); + allDlqUrls = (Object.keys(CLIENT_FIXTURES) as ClientFixtureKey[]).map( + (key) => ctx.clientDlqUrl(getClientConfig(key).clientId), ); - await purgeQueues(sqsClient, [inboundQueueUrl, ...allTargetDlqQueueUrls]); + await purgeQueues(ctx.sqs, [ + ctx.inboundQueueUrl, + deliveryUrl, + ...allDlqUrls, + ]); }); afterAll(async () => { - await purgeQueues(sqsClient, [inboundQueueUrl, ...allTargetDlqQueueUrls]); - sqsClient.destroy(); - cloudWatchClient.destroy(); + await purgeQueues(ctx.sqs, [ + ctx.inboundQueueUrl, + deliveryUrl, + ...allDlqUrls, + ]); + destroyTestContext(ctx); }); describe("Infrastructure validation", () => { - it("should confirm a target DLQ is accessible for all configured subscription targets", async () => { - const responses = await Promise.all( - allTargetDlqQueueUrls.map((queueUrl) => - sqsClient.send( - new GetQueueAttributesCommand({ - QueueUrl: queueUrl, - AttributeNames: ["QueueArn", "ApproximateNumberOfMessages"], - }), - ), - ), + it("should confirm a DLQ is accessible for all configured clients", async () => { + const depths = await Promise.all( + allDlqUrls.map((url) => getQueueDepth(ctx.sqs, url)), ); - for (const response of responses) { - expect(response.Attributes?.QueueArn).toBeDefined(); + for (const depth of depths) { + expect(depth).toBeGreaterThanOrEqual(0); } }); it("should confirm the inbound event queue exists and is accessible", async () => { - const response = await sqsClient.send( - new GetQueueAttributesCommand({ - QueueUrl: inboundQueueUrl, - AttributeNames: ["QueueArn", "ApproximateNumberOfMessages"], - }), - ); - - expect(response.Attributes?.QueueArn).toBeDefined(); + const depth = await getQueueDepth(ctx.sqs, ctx.inboundQueueUrl); + expect(depth).toBeGreaterThanOrEqual(0); }); }); @@ -101,33 +79,32 @@ describe("DLQ Redrive", () => { const startTime = Date.now(); const event: StatusPublishEvent = createMessageStatusPublishEvent(); + const { payload: redrivePayload } = await sendEventToDlqAndRedrive( - sqsClient, - dlqQueueUrl, - inboundQueueUrl, + ctx.sqs, + dlqUrl, + ctx.inboundQueueUrl, event, ); expect(redrivePayload.id).toBe(event.id); - await ensureInboundQueueIsEmpty(sqsClient, inboundQueueUrl); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); - const callbacks = await awaitSignedCallbacksFromWebhookLogGroup( - cloudWatchClient, - webhookLogGroupName, + const callback = await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, event.data.messageId, "MessageStatus", startTime, - buildMockWebhookTargetPath(), ); - expect(callbacks.length).toBeGreaterThan(0); - expect(callbacks[0].payload).toMatchObject({ + expect(callback.payload).toMatchObject({ type: "MessageStatus", attributes: expect.objectContaining({ messageStatus: "delivered", }), }); - assertCallbackHeaders(callbacks[0]); + assertCallbackHeaders(callback); }, 120_000); it("should apply the same transformation logic to redriven events as original deliveries", async () => { @@ -153,47 +130,90 @@ describe("DLQ Redrive", () => { }, }); - await sendSqsEvent(sqsClient, inboundQueueUrl, directEvent); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, directEvent); const { payload: dlqPayload } = await sendEventToDlqAndRedrive( - sqsClient, - dlqQueueUrl, - inboundQueueUrl, + ctx.sqs, + dlqUrl, + ctx.inboundQueueUrl, redriveEvent, ); expect(dlqPayload.data.messageId).toBe(redriveEvent.data.messageId); - const [directCallbacks, redriveCallbacks] = await Promise.all([ - awaitSignedCallbacksFromWebhookLogGroup( - cloudWatchClient, - webhookLogGroupName, - directEvent.data.messageId, - "MessageStatus", - startTime, - buildMockWebhookTargetPath(), - ), - awaitSignedCallbacksFromWebhookLogGroup( - cloudWatchClient, - webhookLogGroupName, - redriveEvent.data.messageId, - "MessageStatus", - startTime, - buildMockWebhookTargetPath(), - ), - ]); - - await ensureInboundQueueIsEmpty(sqsClient, inboundQueueUrl); - - expect(redriveCallbacks[0].payload).toMatchObject({ - type: directCallbacks[0].payload.type, + const callbackMap = await awaitCallbacks( + ctx.cwLogs, + ctx.webhookLogGroup, + [directEvent.data.messageId, redriveEvent.data.messageId], + "MessageStatus", + 1, + startTime, + ); + + const directCallback = callbackMap.get(directEvent.data.messageId)![0]; + const redriveCallback = callbackMap.get(redriveEvent.data.messageId)![0]; + + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); + + expect(redriveCallback.payload).toMatchObject({ + type: directCallback.payload.type, attributes: expect.objectContaining({ messageStatus: ( - directCallbacks[0].payload.attributes as { messageStatus?: string } + directCallback.payload.attributes as { messageStatus?: string } ).messageStatus, }), }); - assertCallbackHeaders(redriveCallbacks[0]); + assertCallbackHeaders(redriveCallback); }, 120_000); }); + + describe("Delivery DLQ redrive", () => { + it("should redrive a 4xx-failed message from the delivery DLQ back through the delivery queue", async () => { + const redriveStartTime = Date.now(); + const forceMessageId = `force-400-redrive-${crypto.randomUUID()}`; + + const failingEvent: StatusPublishEvent = + createMessageStatusPublishEvent({ + data: { messageId: forceMessageId }, + }); + + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, failingEvent); + + const dlqMessage = await awaitQueueMessage(ctx.sqs, dlqUrl, 90_000); + + expect(dlqMessage.Body).toBeDefined(); + expect(dlqMessage.MessageAttributes?.ERROR_CODE?.StringValue).toBe( + "HTTP_CLIENT_ERROR", + ); + + const dlqBody = JSON.parse(dlqMessage.Body as string) as { + payload: { data: { attributes: { messageId: string } }[] }; + subscriptionId: string; + targetId: string; + }; + + const redriveMessageId = `redriven-dlq-${crypto.randomUUID()}`; + dlqBody.payload.data[0].attributes.messageId = redriveMessageId; + + await sendSqsEvent(ctx.sqs, deliveryUrl, dlqBody); + await deleteMessage(ctx.sqs, dlqUrl, dlqMessage); + + const callback = await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, + redriveMessageId, + "MessageStatus", + redriveStartTime, + ); + + expect(callback.payload).toMatchObject({ + type: "MessageStatus", + attributes: expect.objectContaining({ + messageId: redriveMessageId, + messageStatus: "delivered", + }), + }); + assertCallbackHeaders(callback); + }, 180_000); + }); }); diff --git a/tests/integration/fixtures/subscriptions/mock-client-circuit-breaker.json b/tests/integration/fixtures/subscriptions/mock-client-circuit-breaker.json new file mode 100644 index 00000000..58243d3d --- /dev/null +++ b/tests/integration/fixtures/subscriptions/mock-client-circuit-breaker.json @@ -0,0 +1,40 @@ +{ + "clientId": "mock-client-circuit-breaker", + "subscriptions": [ + { + "messageStatuses": [ + "DELIVERED", + "FAILED" + ], + "subscriptionId": "sub-cb-msg-001", + "subscriptionType": "MessageStatus", + "targetIds": [ + "target-cb-001" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "circuitBreaker": { + "enabled": true + }, + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 10, + "targetId": "target-cb-001", + "type": "API" + } + ] +} diff --git a/tests/integration/fixtures/subscriptions/mock-client-2.json b/tests/integration/fixtures/subscriptions/mock-client-fan-out.json similarity index 75% rename from tests/integration/fixtures/subscriptions/mock-client-2.json rename to tests/integration/fixtures/subscriptions/mock-client-fan-out.json index ee7091cd..14985d4c 100644 --- a/tests/integration/fixtures/subscriptions/mock-client-2.json +++ b/tests/integration/fixtures/subscriptions/mock-client-fan-out.json @@ -1,5 +1,5 @@ { - "clientId": "mock-client-2", + "clientId": "mock-client-fan-out", "subscriptions": [ { "messageStatuses": [ @@ -20,6 +20,14 @@ "headerName": "x-api-key", "headerValue": "REPLACED_BY_TERRAFORM" }, + "delivery": { + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", "invocationMethod": "POST", "invocationRateLimit": 10, @@ -31,6 +39,14 @@ "headerName": "x-api-key", "headerValue": "REPLACED_BY_TERRAFORM" }, + "delivery": { + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", "invocationMethod": "POST", "invocationRateLimit": 10, diff --git a/tests/integration/fixtures/subscriptions/mock-client-mtls.json b/tests/integration/fixtures/subscriptions/mock-client-mtls.json new file mode 100644 index 00000000..0fce5d72 --- /dev/null +++ b/tests/integration/fixtures/subscriptions/mock-client-mtls.json @@ -0,0 +1,38 @@ +{ + "clientId": "mock-client-mtls", + "subscriptions": [ + { + "messageStatuses": [ + "DELIVERED", + "FAILED" + ], + "subscriptionId": "sub-mtls-msg-001", + "subscriptionType": "MessageStatus", + "targetIds": [ + "target-mtls-001" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "mtls": { + "certPinning": { + "enabled": true, + "spkiHash": "REPLACED_BY_TERRAFORM" + }, + "enabled": true + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 10, + "targetId": "target-mtls-001", + "type": "API" + } + ] +} diff --git a/tests/integration/fixtures/subscriptions/mock-client-rate-limit.json b/tests/integration/fixtures/subscriptions/mock-client-rate-limit.json new file mode 100644 index 00000000..21e53636 --- /dev/null +++ b/tests/integration/fixtures/subscriptions/mock-client-rate-limit.json @@ -0,0 +1,37 @@ +{ + "clientId": "mock-client-rate-limit", + "subscriptions": [ + { + "messageStatuses": [ + "DELIVERED", + "FAILED" + ], + "subscriptionId": "sub-rl-msg-001", + "subscriptionType": "MessageStatus", + "targetIds": [ + "target-rl-001" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 2, + "targetId": "target-rl-001", + "type": "API" + } + ] +} diff --git a/tests/integration/fixtures/subscriptions/mock-client-short-retry.json b/tests/integration/fixtures/subscriptions/mock-client-short-retry.json new file mode 100644 index 00000000..d6528102 --- /dev/null +++ b/tests/integration/fixtures/subscriptions/mock-client-short-retry.json @@ -0,0 +1,38 @@ +{ + "clientId": "mock-client-short-retry", + "subscriptions": [ + { + "messageStatuses": [ + "DELIVERED", + "FAILED" + ], + "subscriptionId": "sub-sr-msg-001", + "subscriptionType": "MessageStatus", + "targetIds": [ + "target-sr-001" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "maxRetryDurationSeconds": 10, + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 10, + "targetId": "target-sr-001", + "type": "API" + } + ] +} diff --git a/tests/integration/fixtures/subscriptions/mock-client-1.json b/tests/integration/fixtures/subscriptions/mock-client-single-target.json similarity index 84% rename from tests/integration/fixtures/subscriptions/mock-client-1.json rename to tests/integration/fixtures/subscriptions/mock-client-single-target.json index 1e76ad65..41422f4b 100644 --- a/tests/integration/fixtures/subscriptions/mock-client-1.json +++ b/tests/integration/fixtures/subscriptions/mock-client-single-target.json @@ -1,5 +1,5 @@ { - "clientId": "mock-client-1", + "clientId": "mock-client-single-target", "subscriptions": [ { "messageStatuses": [ @@ -35,6 +35,14 @@ "headerName": "x-api-key", "headerValue": "REPLACED_BY_TERRAFORM" }, + "delivery": { + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", "invocationMethod": "POST", "invocationRateLimit": 10, diff --git a/tests/integration/helpers/cloudwatch.ts b/tests/integration/helpers/cloudwatch.ts index 9ee13739..d66b18ce 100644 --- a/tests/integration/helpers/cloudwatch.ts +++ b/tests/integration/helpers/cloudwatch.ts @@ -6,266 +6,255 @@ import { logger } from "@nhs-notify-client-callbacks/logger"; import type { CallbackItem } from "@nhs-notify-client-callbacks/models"; import { TimeoutError, waitUntil } from "async-wait-until"; -const CALLBACK_WAIT_TIMEOUT_MS = 60_000; -const METRICS_WAIT_TIMEOUT_MS = 60_000; +const WAIT_TIMEOUT_MS = 60_000; const POLL_INTERVAL_MS = 2000; -const CLOUDWATCH_QUERY_LOOKBACK_MS = Number( - process.env.CLOUDWATCH_QUERY_LOOKBACK_MS ?? 5000, -); +const LOOKBACK_MS = Number(process.env.CLOUDWATCH_QUERY_LOOKBACK_MS ?? 5000); type LogEntry = { msg: string; - correlationId?: string; + messageId?: string; callbackType?: string; - clientId?: string; apiKey?: string; signature?: string; payload?: string; path?: string; + isMtls?: boolean; }; export type SignedCallback = { payload: CallbackItem; path: string; + isMtls: boolean; headers: { "x-api-key": string; "x-hmac-sha256-signature": string; }; }; -async function querySignedCallbacksFromWebhookLogGroup( - client: CloudWatchLogsClient, - logGroupName: string, - messageId: string, - callbackType: CallbackItem["type"], - startTime: number, -): Promise { - const filterPattern = `{ $.msg = "Callback received" && $.messageId = "${messageId}" && $.callbackType = "${callbackType}" }`; - const queryStartTime = Math.max(0, startTime - CLOUDWATCH_QUERY_LOOKBACK_MS); - - const response = await client.send( - new FilterLogEventsCommand({ - logGroupName, - startTime: queryStartTime, - filterPattern, - }), - ); - - const events = response.events ?? []; - const callbacks: SignedCallback[] = []; - - for (const event of events) { - if (event.message) { - try { - const entry = JSON.parse(event.message) as LogEntry; - if (entry.signature !== undefined && entry.payload) { - callbacks.push({ - payload: JSON.parse(entry.payload) as CallbackItem, - path: entry.path ?? "", - headers: { - "x-api-key": entry.apiKey ?? "", - "x-hmac-sha256-signature": entry.signature, - }, - }); - } - } catch { - // skip unparseable entries - } - } - } - - return callbacks; -} - -async function pollUntilFound( - poll: () => Promise, - timeoutMs: number, - timeoutMessage: string, -): Promise { - let results: T[] = []; - +// eslint-disable-next-line sonarjs/function-return-type -- returns SignedCallback | undefined consistently +function parseCallback( + message: string, + messageIdSet: Set, +): SignedCallback | undefined { try { - await waitUntil( - async () => { - results = await poll(); - return results.length > 0; + const entry = JSON.parse(message) as LogEntry; + if ( + !entry.messageId || + !messageIdSet.has(entry.messageId) || + entry.signature === undefined || + !entry.payload + ) + return undefined; + + return { + payload: JSON.parse(entry.payload) as CallbackItem, + path: entry.path ?? "", + isMtls: entry.isMtls ?? false, + headers: { + "x-api-key": entry.apiKey ?? "", + "x-hmac-sha256-signature": entry.signature, }, - { timeout: timeoutMs, intervalBetweenAttempts: POLL_INTERVAL_MS }, - ); - } catch (error) { - if (error instanceof TimeoutError) { - logger.warn(timeoutMessage); - } else { - throw error; - } + }; + } catch { + return undefined; } - - return results; } -export async function awaitSignedCallbacksFromWebhookLogGroup( +async function pollCallbacks( client: CloudWatchLogsClient, logGroupName: string, - messageId: string, + messageIds: string[], callbackType: CallbackItem["type"], + expectedPerMessage: number, startTime: number, - path: string, ): Promise { - const queryStartTime = Math.max(0, startTime - CLOUDWATCH_QUERY_LOOKBACK_MS); - logger.debug( - `Waiting for callback in webhook CloudWatch log group (messageId=${messageId}, path=${path}, logGroup=${logGroupName}, startTimeIso=${new Date(startTime).toISOString()}, queryStartTimeIso=${new Date(queryStartTime).toISOString()}, lookbackMs=${CLOUDWATCH_QUERY_LOOKBACK_MS})`, - ); - - const callbacks = await pollUntilFound( - () => - querySignedCallbacksFromWebhookLogGroup( - client, - logGroupName, - messageId, - callbackType, - startTime, - ), - CALLBACK_WAIT_TIMEOUT_MS, - `Timed out waiting for callback in webhook CloudWatch log group (messageId=${messageId}, callbackType=${callbackType}, path=${path}, timeoutMs=${CALLBACK_WAIT_TIMEOUT_MS})`, - ); - - if (callbacks.length !== 1) { - throw new Error( - `Expected exactly 1 callback for messageId="${messageId}" callbackType="${callbackType}", but found ${callbacks.length}`, - ); - } - - if (callbacks[0].path !== path) { - throw new Error( - `Expected callback path "${path}" for messageId="${messageId}", but got "${callbacks[0].path}"`, - ); - } - - return callbacks; -} + const messageIdSet = new Set(messageIds); + const expectedTotal = messageIds.length * expectedPerMessage; + const queryStartTime = Math.max(0, startTime - LOOKBACK_MS); + const filterPattern = `{ $.msg = "Callback received" && $.callbackType = "${callbackType}" }`; -export async function awaitSignedCallbacksByCountFromWebhookLogGroup( - client: CloudWatchLogsClient, - logGroupName: string, - messageId: string, - callbackType: CallbackItem["type"], - expectedCount: number, - startTime: number, -): Promise { logger.debug( - `Waiting for callbacks in webhook CloudWatch log group (messageId=${messageId}, callbackType=${callbackType}, expectedCount=${expectedCount}, logGroup=${logGroupName})`, + `Waiting for ${expectedTotal} callback(s) (type=${callbackType}, messages=${messageIds.length}, logGroup=${logGroupName})`, ); - let callbacks: SignedCallback[] = []; + let matched: SignedCallback[] = []; try { await waitUntil( async () => { - callbacks = await querySignedCallbacksFromWebhookLogGroup( - client, - logGroupName, - messageId, - callbackType, - startTime, + const response = await client.send( + new FilterLogEventsCommand({ + logGroupName, + startTime: queryStartTime, + filterPattern, + }), ); - return callbacks.length === expectedCount; - }, - { - timeout: CALLBACK_WAIT_TIMEOUT_MS, - intervalBetweenAttempts: POLL_INTERVAL_MS, + + matched = (response.events ?? []) + .filter((event): event is typeof event & { message: string } => + Boolean(event.message), + ) + .map((event) => parseCallback(event.message, messageIdSet)) + .filter((cb): cb is SignedCallback => cb !== undefined); + + return matched.length >= expectedTotal; }, + { timeout: WAIT_TIMEOUT_MS, intervalBetweenAttempts: POLL_INTERVAL_MS }, ); } catch (error) { if (error instanceof TimeoutError) { logger.warn( - `Timed out waiting for callbacks in webhook CloudWatch log group (messageId=${messageId}, callbackType=${callbackType}, expectedCount=${expectedCount}, timeoutMs=${CALLBACK_WAIT_TIMEOUT_MS})`, + `Timed out waiting for callbacks (expected=${expectedTotal}, found=${matched.length})`, ); } else { throw error; } } - if (callbacks.length !== expectedCount) { + if (matched.length !== expectedTotal) { + const foundIds = new Set( + matched.map( + (cb) => + (cb.payload.attributes as { messageId?: string }).messageId ?? "", + ), + ); + const missingIds = messageIds.filter((id) => !foundIds.has(id)); + logger.warn("Missing callbacks", { + callbackType, + expectedTotal, + foundCount: matched.length, + missingIds, + }); throw new Error( - `Expected exactly ${expectedCount} callbacks for messageId="${messageId}" callbackType="${callbackType}", but found ${callbacks.length}`, + `Expected ${expectedTotal} callback(s) for type="${callbackType}", found ${matched.length}`, ); } - return callbacks; + return matched; } -type EmfEntry = Record; - -function collectMetricNamesFromEvent( - message: string, - metricNames: string[], - found: Set, -): void { - try { - const entry = JSON.parse(message) as EmfEntry; - if (entry._aws) { - for (const name of metricNames) { - if (name in entry) found.add(name); - } - } - } catch { - // skip unparseable entries - } +export async function awaitCallback( + client: CloudWatchLogsClient, + logGroupName: string, + messageId: string, + callbackType: CallbackItem["type"], + startTime: number, +): Promise { + const [callback] = await pollCallbacks( + client, + logGroupName, + [messageId], + callbackType, + 1, + startTime, + ); + return callback; } -async function queryEmfMetricsFromLogGroup( +export async function awaitCallbacks( client: CloudWatchLogsClient, logGroupName: string, - metricNames: string[], + messageIds: string[], + callbackType: CallbackItem["type"], + expectedPerMessage: number, startTime: number, -): Promise> { - const queryStartTime = Math.max(0, startTime - CLOUDWATCH_QUERY_LOOKBACK_MS); - const conditions = metricNames.map((name) => `$.${name} > 0`).join(" || "); - const filterPattern = `{ ${conditions} }`; - - const response = await client.send( - new FilterLogEventsCommand({ - logGroupName, - startTime: queryStartTime, - filterPattern, - }), +): Promise> { + const results = await pollCallbacks( + client, + logGroupName, + messageIds, + callbackType, + expectedPerMessage, + startTime, ); - const found = new Set(); - for (const event of response.events ?? []) { - if (event.message) { - collectMetricNamesFromEvent(event.message, metricNames, found); - } + const map = new Map(); + for (const cb of results) { + const id = + (cb.payload.attributes as { messageId?: string }).messageId ?? ""; + const existing = map.get(id) ?? []; + existing.push(cb); + map.set(id, existing); } - return found; + return map; } -export async function awaitAllEmfMetricsInLogGroup( +export async function awaitEmfMetrics( client: CloudWatchLogsClient, logGroupName: string, metricNames: string[], startTime: number, ): Promise { - const queryStartTime = Math.max(0, startTime - CLOUDWATCH_QUERY_LOOKBACK_MS); - const queryStartTimeIso = new Date(queryStartTime).toISOString(); - const startTimeIso = new Date(startTime).toISOString(); + const queryStartTime = Math.max(0, startTime - LOOKBACK_MS); + const conditions = metricNames.map((name) => `$.${name} > 0`).join(" || "); + const filterPattern = `{ ${conditions} }`; + logger.debug( - `Waiting for EMF metrics in CloudWatch log group (metrics=${metricNames.join(",")}, logGroup=${logGroupName}, startTimeIso=${startTimeIso}, queryStartTimeIso=${queryStartTimeIso}, lookbackMs=${CLOUDWATCH_QUERY_LOOKBACK_MS})`, + `Waiting for EMF metrics [${metricNames.join(", ")}] in ${logGroupName}`, ); await waitUntil( async () => { - const found = await queryEmfMetricsFromLogGroup( - client, - logGroupName, - metricNames, - startTime, + const response = await client.send( + new FilterLogEventsCommand({ + logGroupName, + startTime: queryStartTime, + filterPattern, + }), ); + + const found = new Set(); + for (const event of response.events ?? []) { + try { + const entry = JSON.parse(event.message ?? "") as Record< + string, + unknown + >; + if (entry._aws) { + for (const name of metricNames) { + if (name in entry) found.add(name); + } + } + } catch { + // skip unparseable entries + } + } return metricNames.every((name) => found.has(name)); }, - { - timeout: METRICS_WAIT_TIMEOUT_MS, - intervalBetweenAttempts: POLL_INTERVAL_MS, - }, + { timeout: WAIT_TIMEOUT_MS, intervalBetweenAttempts: POLL_INTERVAL_MS }, ); } + +export async function countLogEntries( + client: CloudWatchLogsClient, + logGroupName: string, + filterPattern: string, + startTime: number, + minCount: number, +): Promise { + const queryStartTime = Math.max(0, startTime - LOOKBACK_MS); + + let count = 0; + try { + await waitUntil( + async () => { + const response = await client.send( + new FilterLogEventsCommand({ + logGroupName, + startTime: queryStartTime, + filterPattern, + }), + ); + count = (response.events ?? []).length; + return count >= minCount; + }, + { timeout: WAIT_TIMEOUT_MS, intervalBetweenAttempts: POLL_INTERVAL_MS }, + ); + } catch (error) { + if (!(error instanceof TimeoutError)) { + throw error; + } + } + + return count; +} diff --git a/tests/integration/helpers/mock-client-config.ts b/tests/integration/helpers/mock-client-config.ts index eb94974c..52bb3570 100644 --- a/tests/integration/helpers/mock-client-config.ts +++ b/tests/integration/helpers/mock-client-config.ts @@ -1,6 +1,6 @@ import { readFileSync } from "node:fs"; import path from "node:path"; -import type seedConfigJson from "../fixtures/subscriptions/mock-client-1.json"; +import type seedConfigJson from "../fixtures/subscriptions/mock-client-single-target.json"; type ClientFixtureShape = typeof seedConfigJson; @@ -10,28 +10,40 @@ export type MockItClientConfig = ClientFixtureShape & { }; export const CLIENT_FIXTURES = { - client1: { - fixture: "mock-client-1.json", + clientSingleTarget: { + fixture: "mock-client-single-target.json", apiKeyVar: "MOCK_CLIENT_API_KEY", applicationIdVar: "MOCK_CLIENT_APPLICATION_ID", }, - client2: { - fixture: "mock-client-2.json", - apiKeyVar: "MOCK_CLIENT_2_API_KEY", - applicationIdVar: "MOCK_CLIENT_2_APPLICATION_ID", + clientFanOut: { + fixture: "mock-client-fan-out.json", + apiKeyVar: "MOCK_CLIENT_FAN_OUT_API_KEY", + applicationIdVar: "MOCK_CLIENT_FAN_OUT_APPLICATION_ID", + }, + clientMtls: { + fixture: "mock-client-mtls.json", + apiKeyVar: "MOCK_CLIENT_MTLS_API_KEY", + applicationIdVar: "MOCK_CLIENT_MTLS_APPLICATION_ID", + }, + clientRateLimit: { + fixture: "mock-client-rate-limit.json", + apiKeyVar: "MOCK_CLIENT_RATE_LIMIT_API_KEY", + applicationIdVar: "MOCK_CLIENT_RATE_LIMIT_APPLICATION_ID", + }, + clientCircuitBreaker: { + fixture: "mock-client-circuit-breaker.json", + apiKeyVar: "MOCK_CLIENT_CIRCUIT_BREAKER_API_KEY", + applicationIdVar: "MOCK_CLIENT_CIRCUIT_BREAKER_APPLICATION_ID", + }, + clientShortRetry: { + fixture: "mock-client-short-retry.json", + apiKeyVar: "MOCK_CLIENT_SHORT_RETRY_API_KEY", + applicationIdVar: "MOCK_CLIENT_SHORT_RETRY_APPLICATION_ID", }, } as const; export type ClientFixtureKey = keyof typeof CLIENT_FIXTURES; -const ALL_CLIENT_FIXTURE_KEYS = Object.keys( - CLIENT_FIXTURES, -) as ClientFixtureKey[]; - -function dedupe(values: string[]): string[] { - return [...new Set(values)]; -} - export function getClientConfig(key: ClientFixtureKey): MockItClientConfig { // eslint-disable-next-line security/detect-object-injection -- key is constrained to ClientFixtureKey, a keyof the hardcoded as-const CLIENT_FIXTURES object const { apiKeyVar, applicationIdVar, fixture } = CLIENT_FIXTURES[key]; @@ -48,11 +60,7 @@ export function getClientConfig(key: ClientFixtureKey): MockItClientConfig { } export function getMockItClientConfig(): MockItClientConfig { - return getClientConfig("client1"); -} - -export function getMockItClient2Config(): MockItClientConfig { - return getClientConfig("client2"); + return getClientConfig("clientSingleTarget"); } function buildWebhookTargetPaths(key: ClientFixtureKey): string[] { @@ -61,7 +69,7 @@ function buildWebhookTargetPaths(key: ClientFixtureKey): string[] { } export function buildMockWebhookTargetPath( - key: ClientFixtureKey = "client1", + key: ClientFixtureKey = "clientSingleTarget", ): string { const paths = buildWebhookTargetPaths(key); @@ -73,22 +81,7 @@ export function buildMockWebhookTargetPath( } export function buildMockWebhookTargetPaths( - key: ClientFixtureKey = "client1", + key: ClientFixtureKey = "clientSingleTarget", ): string[] { return buildWebhookTargetPaths(key); } - -export function getSubscriptionTargetIds( - key: ClientFixtureKey = "client1", -): string[] { - const config = getClientConfig(key); - return dedupe( - config.subscriptions.flatMap((subscription) => subscription.targetIds), - ); -} - -export function getAllSubscriptionTargetIds( - keys: ClientFixtureKey[] = ALL_CLIENT_FIXTURE_KEYS, -): string[] { - return dedupe(keys.flatMap((key) => getSubscriptionTargetIds(key))); -} diff --git a/tests/integration/helpers/sqs.ts b/tests/integration/helpers/sqs.ts index 857fd3a7..5cdcc3a9 100644 --- a/tests/integration/helpers/sqs.ts +++ b/tests/integration/helpers/sqs.ts @@ -1,5 +1,6 @@ import { ChangeMessageVisibilityCommand, + DeleteMessageCommand, GetQueueAttributesCommand, type Message, PurgeQueueCommand, @@ -46,13 +47,16 @@ function buildQueueUrl( export function buildMockClientDlqQueueUrl( deploymentDetails: DeploymentDetails, - targets: { targetId: string }[], + clientId: string, ): string { - const [firstTarget] = targets; - if (!firstTarget) { - throw new Error("At least one target is required to build DLQ URL"); - } - return buildQueueUrl(deploymentDetails, `${firstTarget.targetId}-dlq`); + return buildQueueUrl(deploymentDetails, `${clientId}-delivery-dlq`); +} + +export function buildMockClientDeliveryQueueUrl( + deploymentDetails: DeploymentDetails, + clientId: string, +): string { + return buildQueueUrl(deploymentDetails, `${clientId}-delivery`); } export async function sendSqsEvent( @@ -165,6 +169,7 @@ async function receiveOneMessage(client: SQSClient, queueUrl: string) { export async function awaitQueueMessage( client: SQSClient, queueUrl: string, + timeoutMs: number = QUEUE_WAIT_TIMEOUT_MS, ): Promise { let message: Message | undefined; @@ -176,13 +181,13 @@ export async function awaitQueueMessage( }, { intervalBetweenAttempts: POLL_INTERVAL_MS, - timeout: QUEUE_WAIT_TIMEOUT_MS, + timeout: timeoutMs, }, ); if (!message) { throw new Error( - `Timed out after ${QUEUE_WAIT_TIMEOUT_MS}ms waiting for a message to appear in ${queueUrl}`, + `Timed out after ${timeoutMs}ms waiting for a message to appear in ${queueUrl}`, ); } @@ -246,3 +251,26 @@ export async function awaitQueueMessageByMessageId( return matchedMessage; } + +export async function deleteMessage( + client: SQSClient, + queueUrl: string, + message: Message, +): Promise { + await client.send( + new DeleteMessageCommand({ + QueueUrl: queueUrl, + ReceiptHandle: message.ReceiptHandle!, + }), + ); +} + +export async function getQueueDepth( + client: SQSClient, + queueUrl: string, +): Promise { + return getQueueMessageCount(client, queueUrl, [ + "ApproximateNumberOfMessages", + "ApproximateNumberOfMessagesNotVisible", + ]); +} diff --git a/tests/integration/helpers/status-events.ts b/tests/integration/helpers/status-events.ts deleted file mode 100644 index 1bccf0bb..00000000 --- a/tests/integration/helpers/status-events.ts +++ /dev/null @@ -1,89 +0,0 @@ -import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; -import { SQSClient } from "@aws-sdk/client-sqs"; -import type { - ChannelStatusData, - MessageStatusData, - StatusPublishEvent, -} from "@nhs-notify-client-callbacks/models"; - -import { - type SignedCallback, - awaitSignedCallbacksFromWebhookLogGroup, -} from "./cloudwatch"; -import { ensureInboundQueueIsEmpty, sendSqsEvent } from "./sqs"; - -async function processStatusEvent< - T extends MessageStatusData | ChannelStatusData, ->( - { - CloudWatchLogsClient: cloudWatchClient, - SQSClient: sqsClient, - }: { CloudWatchLogsClient: CloudWatchLogsClient; SQSClient: SQSClient }, - callbackEventQueueUrl: string, - webhookLogGroupName: string, - event: StatusPublishEvent, - callbackType: SignedCallback["payload"]["type"], - webhookPath: string, - startTime: number, -): Promise { - const sendMessageResponse = await sendSqsEvent( - sqsClient, - callbackEventQueueUrl, - event, - ); - - if (!sendMessageResponse.MessageId) { - throw new Error("Expected SQS send response to include MessageId"); - } - - await ensureInboundQueueIsEmpty(sqsClient, callbackEventQueueUrl); - - return awaitSignedCallbacksFromWebhookLogGroup( - cloudWatchClient, - webhookLogGroupName, - event.data.messageId, - callbackType, - startTime, - webhookPath, - ); -} - -export async function processMessageStatusEvent( - sqsClient: SQSClient, - cloudWatchClient: CloudWatchLogsClient, - callbackEventQueueUrl: string, - webhookLogGroupName: string, - messageStatusEvent: StatusPublishEvent, - webhookPath: string, - startTime: number, -): Promise { - return processStatusEvent( - { CloudWatchLogsClient: cloudWatchClient, SQSClient: sqsClient }, - callbackEventQueueUrl, - webhookLogGroupName, - messageStatusEvent, - "MessageStatus", - webhookPath, - startTime, - ); -} - -export async function processChannelStatusEvent( - sqsClient: SQSClient, - cloudWatchClient: CloudWatchLogsClient, - callbackEventQueueUrl: string, - webhookLogGroupName: string, - channelStatusEvent: StatusPublishEvent, - webhookPath: string, - startTime: number, -): Promise { - return processStatusEvent( - { CloudWatchLogsClient: cloudWatchClient, SQSClient: sqsClient }, - callbackEventQueueUrl, - webhookLogGroupName, - channelStatusEvent, - "ChannelStatus", - webhookPath, - startTime, - ); -} diff --git a/tests/integration/helpers/test-context.ts b/tests/integration/helpers/test-context.ts new file mode 100644 index 00000000..df5a31f5 --- /dev/null +++ b/tests/integration/helpers/test-context.ts @@ -0,0 +1,52 @@ +import type { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; +import type { SQSClient } from "@aws-sdk/client-sqs"; +import type { DeploymentDetails } from "@nhs-notify-client-callbacks/test-support/helpers"; +import { + buildInboundEventDlqQueueUrl, + buildInboundEventQueueUrl, + buildLambdaLogGroupName, + createCloudWatchLogsClient, + createSqsClient, + getDeploymentDetails, +} from "@nhs-notify-client-callbacks/test-support/helpers"; +import { + buildMockClientDeliveryQueueUrl, + buildMockClientDlqQueueUrl, +} from "./sqs"; + +export type TestContext = { + sqs: SQSClient; + cwLogs: CloudWatchLogsClient; + deployment: DeploymentDetails; + inboundQueueUrl: string; + inboundDlqUrl: string; + webhookLogGroup: string; + startTime: number; + clientDlqUrl(clientId: string): string; + clientDeliveryUrl(clientId: string): string; + logGroup(name: string): string; +}; + +export function createTestContext(): TestContext { + const deployment = getDeploymentDetails(); + + return { + sqs: createSqsClient(deployment), + cwLogs: createCloudWatchLogsClient(deployment), + deployment, + inboundQueueUrl: buildInboundEventQueueUrl(deployment), + inboundDlqUrl: buildInboundEventDlqQueueUrl(deployment), + webhookLogGroup: buildLambdaLogGroupName(deployment, "mock-webhook"), + startTime: Date.now(), + clientDlqUrl: (clientId) => + buildMockClientDlqQueueUrl(deployment, clientId), + clientDeliveryUrl: (clientId) => + buildMockClientDeliveryQueueUrl(deployment, clientId), + logGroup: (name) => buildLambdaLogGroupName(deployment, name), + }; +} + +export function destroyTestContext(ctx: TestContext): void { + ctx.sqs.destroy(); + ctx.cwLogs.destroy(); +} diff --git a/tests/integration/inbound-sqs-to-webhook.test.ts b/tests/integration/inbound-sqs-to-webhook.test.ts index 4305f05e..13d8ec14 100644 --- a/tests/integration/inbound-sqs-to-webhook.test.ts +++ b/tests/integration/inbound-sqs-to-webhook.test.ts @@ -1,19 +1,9 @@ -import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; -import { DeleteMessageCommand, SQSClient } from "@aws-sdk/client-sqs"; -import { - type ChannelStatusData, - type MessageStatusData, - type StatusPublishEvent, +import type { + ChannelStatusData, + MessageStatusData, + StatusPublishEvent, } from "@nhs-notify-client-callbacks/models"; -import { - buildInboundEventDlqQueueUrl, - buildInboundEventQueueUrl, - buildLambdaLogGroupName, - createCloudWatchLogsClient, - createSqsClient, - getDeploymentDetails, -} from "@nhs-notify-client-callbacks/test-support/helpers"; -import { awaitSignedCallbacksByCountFromWebhookLogGroup } from "./helpers/cloudwatch"; +import { awaitCallback, awaitCallbacks } from "./helpers/cloudwatch"; import { createChannelStatusPublishEvent, createMessageStatusPublishEvent, @@ -21,143 +11,112 @@ import { import { buildMockWebhookTargetPath, buildMockWebhookTargetPaths, - getMockItClient2Config, - getMockItClientConfig, + getClientConfig, } from "./helpers/mock-client-config"; import { assertCallbackHeaders } from "./helpers/signature"; import { awaitQueueMessage, awaitQueueMessageByMessageId, - buildMockClientDlqQueueUrl, + deleteMessage, ensureInboundQueueIsEmpty, purgeQueues, sendSqsEvent, } from "./helpers/sqs"; import { - processChannelStatusEvent, - processMessageStatusEvent, -} from "./helpers/status-events"; - -function compareStrings(a: string, b: string): number { - if (a > b) return 1; - if (a < b) return -1; - return 0; -} + type TestContext, + createTestContext, + destroyTestContext, +} from "./helpers/test-context"; describe("SQS to Webhook Integration", () => { - let sqsClient: SQSClient; - let cloudWatchClient: CloudWatchLogsClient; - let callbackEventQueueUrl: string; - let clientDlqQueueUrl: string; - let inboundEventDlqQueueUrl: string; - let webhookLogGroupName: string; - let webhookTargetPath: string; - let startTime: number; + let ctx: TestContext; + let clientDlqUrl: string; + let clientDeliveryUrl: string; beforeAll(async () => { - const deploymentDetails = getDeploymentDetails(); - const { targets } = getMockItClientConfig(); - - sqsClient = createSqsClient(deploymentDetails); - cloudWatchClient = createCloudWatchLogsClient(deploymentDetails); - callbackEventQueueUrl = buildInboundEventQueueUrl(deploymentDetails); - clientDlqQueueUrl = buildMockClientDlqQueueUrl(deploymentDetails, targets); - inboundEventDlqQueueUrl = buildInboundEventDlqQueueUrl(deploymentDetails); - webhookLogGroupName = buildLambdaLogGroupName( - deploymentDetails, - "mock-webhook", - ); - webhookTargetPath = buildMockWebhookTargetPath(); - startTime = Date.now(); - await purgeQueues(sqsClient, [ - inboundEventDlqQueueUrl, - clientDlqQueueUrl, - callbackEventQueueUrl, + ctx = createTestContext(); + const { clientId } = getClientConfig("clientSingleTarget"); + clientDlqUrl = ctx.clientDlqUrl(clientId); + clientDeliveryUrl = ctx.clientDeliveryUrl(clientId); + await purgeQueues(ctx.sqs, [ + ctx.inboundDlqUrl, + clientDlqUrl, + clientDeliveryUrl, + ctx.inboundQueueUrl, ]); }); afterAll(async () => { - await purgeQueues(sqsClient, [ - inboundEventDlqQueueUrl, - clientDlqQueueUrl, - callbackEventQueueUrl, + await purgeQueues(ctx.sqs, [ + ctx.inboundDlqUrl, + clientDlqUrl, + clientDeliveryUrl, + ctx.inboundQueueUrl, ]); - - sqsClient.destroy(); - cloudWatchClient.destroy(); + destroyTestContext(ctx); }); describe("Message Status Event Flow", () => { it("should process message status event from SQS to webhook", async () => { - const messageStatusEvent: StatusPublishEvent = - createMessageStatusPublishEvent(); - - const callbacks = await processMessageStatusEvent( - sqsClient, - cloudWatchClient, - callbackEventQueueUrl, - webhookLogGroupName, - messageStatusEvent, - webhookTargetPath, - startTime, - ); + const event = createMessageStatusPublishEvent(); - expect(callbacks).toHaveLength(1); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); - expect(callbacks[0].payload).toMatchObject({ - type: "MessageStatus", + const callback = await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, + event.data.messageId, + "MessageStatus", + ctx.startTime, + ); - attributes: expect.objectContaining({ - messageStatus: "delivered", - }), + expect(callback.payload).toMatchObject({ + type: "MessageStatus", + attributes: expect.objectContaining({ messageStatus: "delivered" }), }); - - assertCallbackHeaders(callbacks[0]); + assertCallbackHeaders(callback); }, 120_000); it("should fan out a message status event to subscription with multiple target endpoints", async () => { - const client2Config = getMockItClient2Config(); - const expectedPaths = buildMockWebhookTargetPaths("client2"); + const fanOutConfig = getClientConfig("clientFanOut"); + const expectedPaths = buildMockWebhookTargetPaths("clientFanOut"); - const messageStatusEvent: StatusPublishEvent = + const event: StatusPublishEvent = createMessageStatusPublishEvent({ - data: { - clientId: client2Config.clientId, - }, + data: { clientId: fanOutConfig.clientId }, }); - await sendSqsEvent(sqsClient, callbackEventQueueUrl, messageStatusEvent); - await ensureInboundQueueIsEmpty(sqsClient, callbackEventQueueUrl); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); - const callbacks = await awaitSignedCallbacksByCountFromWebhookLogGroup( - cloudWatchClient, - webhookLogGroupName, - messageStatusEvent.data.messageId, + const callbackMap = await awaitCallbacks( + ctx.cwLogs, + ctx.webhookLogGroup, + [event.data.messageId], "MessageStatus", expectedPaths.length, - startTime, + ctx.startTime, ); - expect(callbacks).toHaveLength(expectedPaths.length); + const callbacks = callbackMap.get(event.data.messageId)!; - const actualPaths = callbacks - .map((callback) => callback.path) - .toSorted(compareStrings); - expect(actualPaths).toEqual(expectedPaths.toSorted(compareStrings)); + const paths = callbacks.map((cb) => cb.path); + expect(paths).toHaveLength(expectedPaths.length); + expect(paths).toEqual(expect.arrayContaining(expectedPaths)); for (const callback of callbacks) { expect(callback.payload).toMatchObject({ type: "MessageStatus", attributes: expect.objectContaining({ - messageId: messageStatusEvent.data.messageId, + messageId: event.data.messageId, messageStatus: "delivered", }), }); - assertCallbackHeaders( callback, - client2Config.apiKeyVar, - client2Config.applicationIdVar, + fanOutConfig.apiKeyVar, + fanOutConfig.applicationIdVar, ); } }, 120_000); @@ -165,86 +124,73 @@ describe("SQS to Webhook Integration", () => { describe("Channel Status Event Flow", () => { it("should process channel status event from SQS to webhook", async () => { - const channelStatusEvent: StatusPublishEvent = + const event: StatusPublishEvent = createChannelStatusPublishEvent(); - const callbacks = await processChannelStatusEvent( - sqsClient, - cloudWatchClient, - callbackEventQueueUrl, - webhookLogGroupName, - channelStatusEvent, - webhookTargetPath, - startTime, - ); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); - expect(callbacks).toHaveLength(1); + const callback = await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, + event.data.messageId, + "ChannelStatus", + ctx.startTime, + ); - expect(callbacks[0].payload).toMatchObject({ + expect(callback.payload).toMatchObject({ type: "ChannelStatus", attributes: expect.objectContaining({ channel: "nhsapp", channelStatus: "delivered", supplierStatus: "delivered", - messageId: channelStatusEvent.data.messageId, + messageId: event.data.messageId, }), }); - - assertCallbackHeaders(callbacks[0]); + assertCallbackHeaders(callback); }, 120_000); }); describe("Client Webhook DLQ", () => { - it("should route a non-retriable (4xx) webhook response to the per-target DLQ", async () => { + it("should route a non-retriable (4xx) webhook response to the per-client DLQ", async () => { const event: StatusPublishEvent = createMessageStatusPublishEvent({ - data: { - messageId: `force-400-${Date.now()}`, - }, + data: { messageId: `force-400-${crypto.randomUUID()}` }, }); - await sendSqsEvent(sqsClient, callbackEventQueueUrl, event); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); - const dlqMessage = await awaitQueueMessage(sqsClient, clientDlqQueueUrl); + const dlqMessage = await awaitQueueMessage(ctx.sqs, clientDlqUrl); expect(dlqMessage.Body).toBeDefined(); expect(dlqMessage.MessageAttributes?.ERROR_CODE?.StringValue).toBe( - "INVALID_PARAMETER", + "HTTP_CLIENT_ERROR", ); expect( dlqMessage.MessageAttributes?.ERROR_MESSAGE?.StringValue, ).toContain("Forced status 400"); - await sqsClient.send( - new DeleteMessageCommand({ - QueueUrl: clientDlqQueueUrl, - ReceiptHandle: dlqMessage.ReceiptHandle!, - }), - ); + await deleteMessage(ctx.sqs, clientDlqUrl, dlqMessage); }, 120_000); }); describe("Inbound Event DLQ", () => { it("should move an invalid inbound event to the inbound-event DLQ when schema validation fails", async () => { - const messageId = `invalid-schema-${Date.now()}`; + const messageId = `invalid-schema-${crypto.randomUUID()}`; const invalidEvent = createMessageStatusPublishEvent({ data: { messageId, - channels: [ - // @ts-expect-error - intentionally invalid for schema-failure DLQ path - { - channelStatus: "DELIVERED", - }, - ], + // @ts-expect-error - intentionally invalid for schema-failure DLQ path + channels: [{ channelStatus: "DELIVERED" }], }, }); - await sendSqsEvent(sqsClient, callbackEventQueueUrl, invalidEvent); - await ensureInboundQueueIsEmpty(sqsClient, callbackEventQueueUrl); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, invalidEvent); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); const dlqMessage = await awaitQueueMessageByMessageId( - sqsClient, - inboundEventDlqQueueUrl, + ctx.sqs, + ctx.inboundDlqUrl, messageId, ); @@ -252,11 +198,43 @@ describe("SQS to Webhook Integration", () => { const dlqPayload = JSON.parse(dlqMessage.Body as string); expect(dlqPayload.data.messageId).toBe(messageId); - await sqsClient.send( - new DeleteMessageCommand({ - QueueUrl: inboundEventDlqQueueUrl, - ReceiptHandle: dlqMessage.ReceiptHandle!, + await deleteMessage(ctx.sqs, ctx.inboundDlqUrl, dlqMessage); + }, 120_000); + }); + + describe("mTLS Delivery", () => { + it("should deliver a callback via mTLS to the mTLS-secured mock webhook", async () => { + const mtlsConfig = getClientConfig("clientMtls"); + + const event: StatusPublishEvent = + createMessageStatusPublishEvent({ + data: { clientId: mtlsConfig.clientId }, + }); + + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); + + const callback = await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, + event.data.messageId, + "MessageStatus", + ctx.startTime, + ); + + expect(callback.path).toBe(buildMockWebhookTargetPath("clientMtls")); + expect(callback.isMtls).toBe(true); + expect(callback.payload).toMatchObject({ + type: "MessageStatus", + attributes: expect.objectContaining({ + messageId: event.data.messageId, + messageStatus: "delivered", }), + }); + assertCallbackHeaders( + callback, + mtlsConfig.apiKeyVar, + mtlsConfig.applicationIdVar, ); }, 120_000); }); diff --git a/tests/integration/metrics.test.ts b/tests/integration/metrics.test.ts index 2f314f85..20e1dfb8 100644 --- a/tests/integration/metrics.test.ts +++ b/tests/integration/metrics.test.ts @@ -1,77 +1,50 @@ -import { DeleteMessageCommand, SQSClient } from "@aws-sdk/client-sqs"; -import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; import type { MessageStatusData, StatusPublishEvent, } from "@nhs-notify-client-callbacks/models"; +import { awaitCallback, awaitEmfMetrics } from "./helpers/cloudwatch"; +import { createMessageStatusPublishEvent } from "./helpers/event-factories"; +import { getClientConfig } from "./helpers/mock-client-config"; import { - buildInboundEventDlqQueueUrl, - buildInboundEventQueueUrl, - buildLambdaLogGroupName, - createCloudWatchLogsClient, - createSqsClient, - getDeploymentDetails, -} from "@nhs-notify-client-callbacks/test-support/helpers"; -import { + awaitQueueMessage, awaitQueueMessageByMessageId, - buildMockClientDlqQueueUrl, + deleteMessage, ensureInboundQueueIsEmpty, purgeQueues, sendSqsEvent, } from "./helpers/sqs"; import { - buildMockWebhookTargetPath, - getMockItClientConfig, -} from "./helpers/mock-client-config"; -import { - awaitAllEmfMetricsInLogGroup, - awaitSignedCallbacksFromWebhookLogGroup, -} from "./helpers/cloudwatch"; -import { createMessageStatusPublishEvent } from "./helpers/event-factories"; + type TestContext, + createTestContext, + destroyTestContext, +} from "./helpers/test-context"; describe("Metrics", () => { - let sqsClient: SQSClient; - let cloudWatchClient: CloudWatchLogsClient; - let callbackEventQueueUrl: string; - let clientDlqQueueUrl: string; - let inboundEventDlqQueueUrl: string; - let logGroupName: string; - let webhookLogGroupName: string; + let ctx: TestContext; + let clientDlqUrl: string; + let transformFilterLogGroup: string; beforeAll(async () => { - const deploymentDetails = getDeploymentDetails(); - const { targets } = getMockItClientConfig(); - - sqsClient = createSqsClient(deploymentDetails); - cloudWatchClient = createCloudWatchLogsClient(deploymentDetails); - callbackEventQueueUrl = buildInboundEventQueueUrl(deploymentDetails); - clientDlqQueueUrl = buildMockClientDlqQueueUrl(deploymentDetails, targets); - inboundEventDlqQueueUrl = buildInboundEventDlqQueueUrl(deploymentDetails); - logGroupName = buildLambdaLogGroupName( - deploymentDetails, - "client-transform-filter", - ); - webhookLogGroupName = buildLambdaLogGroupName( - deploymentDetails, - "mock-webhook", - ); - - await purgeQueues(sqsClient, [ - inboundEventDlqQueueUrl, - clientDlqQueueUrl, - callbackEventQueueUrl, + ctx = createTestContext(); + const { clientId } = getClientConfig("clientSingleTarget"); + + clientDlqUrl = ctx.clientDlqUrl(clientId); + transformFilterLogGroup = ctx.logGroup("client-transform-filter"); + + await purgeQueues(ctx.sqs, [ + ctx.inboundDlqUrl, + clientDlqUrl, + ctx.inboundQueueUrl, ]); }); afterAll(async () => { - await purgeQueues(sqsClient, [ - inboundEventDlqQueueUrl, - clientDlqQueueUrl, - callbackEventQueueUrl, + await purgeQueues(ctx.sqs, [ + ctx.inboundDlqUrl, + clientDlqUrl, + ctx.inboundQueueUrl, ]); - - sqsClient.destroy(); - cloudWatchClient.destroy(); + destroyTestContext(ctx); }); describe("Successful event processing", () => { @@ -79,40 +52,38 @@ describe("Metrics", () => { const startTime = Date.now(); const event = createMessageStatusPublishEvent(); - await sendSqsEvent(sqsClient, callbackEventQueueUrl, event); - await ensureInboundQueueIsEmpty(sqsClient, callbackEventQueueUrl); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); - // Wait for signed callback log to confirm the invocation completed before checking metrics - const callbacks = await awaitSignedCallbacksFromWebhookLogGroup( - cloudWatchClient, - webhookLogGroupName, + await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, event.data.messageId, "MessageStatus", startTime, - buildMockWebhookTargetPath(), ); - expect(callbacks.length).toBeGreaterThan(0); - - await awaitAllEmfMetricsInLogGroup( - cloudWatchClient, - logGroupName, - [ - "EventsReceived", - "TransformationsSuccessful", - "FilteringStarted", - "FilteringMatched", - "CallbacksInitiated", - ], - startTime, - ); + await expect( + awaitEmfMetrics( + ctx.cwLogs, + transformFilterLogGroup, + [ + "EventsReceived", + "TransformationsSuccessful", + "FilteringStarted", + "FilteringMatched", + "CallbacksInitiated", + ], + startTime, + ), + ).resolves.toBeUndefined(); }, 120_000); }); describe("Validation error", () => { it("should emit ValidationErrors metric when an invalid event fails schema validation", async () => { const startTime = Date.now(); - const messageId = `invalid-schema-metrics-${Date.now()}`; + const messageId = `invalid-schema-metrics-${crypto.randomUUID()}`; const invalidEvent: StatusPublishEvent = createMessageStatusPublishEvent({ data: { @@ -122,30 +93,83 @@ describe("Metrics", () => { }, }); - await sendSqsEvent(sqsClient, callbackEventQueueUrl, invalidEvent); + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, invalidEvent); - // Wait for the event to land on the DLQ, confirming the Lambda ran and failed validation const dlqMessage = await awaitQueueMessageByMessageId( - sqsClient, - inboundEventDlqQueueUrl, + ctx.sqs, + ctx.inboundDlqUrl, messageId, ); expect(dlqMessage.Body).toBeDefined(); + await deleteMessage(ctx.sqs, ctx.inboundDlqUrl, dlqMessage); - await sqsClient.send( - new DeleteMessageCommand({ - QueueUrl: inboundEventDlqQueueUrl, - ReceiptHandle: dlqMessage.ReceiptHandle!, - }), + await awaitEmfMetrics( + ctx.cwLogs, + transformFilterLogGroup, + ["EventsReceived", "ValidationErrors"], + startTime, ); + }, 120_000); + }); - await awaitAllEmfMetricsInLogGroup( - cloudWatchClient, - logGroupName, - ["EventsReceived", "ValidationErrors"], + describe("HTTPS Client Lambda metrics", () => { + let httpsClientLogGroup: string; + + beforeAll(() => { + const { clientId } = getClientConfig("clientSingleTarget"); + httpsClientLogGroup = ctx.logGroup(`https-client-${clientId}`); + }); + + it("should emit DeliveryAttempt, DeliverySuccess and DeliveryDurationMs on successful delivery", async () => { + const startTime = Date.now(); + const event = createMessageStatusPublishEvent(); + + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + await ensureInboundQueueIsEmpty(ctx.sqs, ctx.inboundQueueUrl); + + await awaitCallback( + ctx.cwLogs, + ctx.webhookLogGroup, + event.data.messageId, + "MessageStatus", startTime, ); + + await expect( + awaitEmfMetrics( + ctx.cwLogs, + httpsClientLogGroup, + ["DeliveryAttempt", "DeliverySuccess", "DeliveryDurationMs"], + startTime, + ), + ).resolves.toBeUndefined(); + }, 120_000); + + it("should emit DeliveryAttempt, DeliveryPermanentFailure and DeliveryDurationMs on 4xx response", async () => { + const startTime = Date.now(); + const messageId = `force-400-metrics-${crypto.randomUUID()}`; + + const event: StatusPublishEvent = + createMessageStatusPublishEvent({ + data: { messageId }, + }); + + await sendSqsEvent(ctx.sqs, ctx.inboundQueueUrl, event); + + const dlqMessage = await awaitQueueMessage(ctx.sqs, clientDlqUrl, 90_000); + + expect(dlqMessage.Body).toBeDefined(); + await deleteMessage(ctx.sqs, clientDlqUrl, dlqMessage); + + await expect( + awaitEmfMetrics( + ctx.cwLogs, + httpsClientLogGroup, + ["DeliveryAttempt", "DeliveryPermanentFailure", "DeliveryDurationMs"], + startTime, + ), + ).resolves.toBeUndefined(); }, 120_000); }); }); diff --git a/tests/performance/README.md b/tests/performance/README.md deleted file mode 100644 index 9a44eccf..00000000 --- a/tests/performance/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# performance - -Load tests for the client-callbacks service. These tests run against a real deployed AWS environment — they are not unit tests and cannot run locally without a live stack. - -## Prerequisites - -- AWS credentials configured for the target environment -- The service deployed to the target environment - -## Environment Variables - -| Variable | Required | Default | Description | -| --- | --- | --- | --- | -| `ENVIRONMENT` | Yes | — | Target environment name (e.g. `dev`) | -| `AWS_ACCOUNT_ID` | Yes | — | AWS account ID for the target environment | -| `AWS_REGION` | No | `eu-west-2` | AWS region | -| `PROJECT` | No | `nhs` | Project name prefix used in resource naming | -| `COMPONENT` | No | `callbacks` | Component name used in resource naming | - -## Running - -From the repository root: - -```bash -ENVIRONMENT=dev AWS_ACCOUNT_ID=123456789012 npm run test:performance --workspace tests/performance -``` - -## What the Tests Do - -The load test sends ~3,000 events/s to the SQS inbound queue for 30 seconds, then reads CloudWatch Logs to assert that the p95 Lambda processing time is below 500ms. - -The global teardown removes the test client subscription config from S3. diff --git a/tests/performance/fixtures/subscriptions/perf-client-1.json b/tests/performance/fixtures/subscriptions/perf-client-1.json new file mode 100644 index 00000000..1c730b8a --- /dev/null +++ b/tests/performance/fixtures/subscriptions/perf-client-1.json @@ -0,0 +1,44 @@ +{ + "clientId": "perf-client-1", + "subscriptions": [ + { + "messageStatuses": [ + "DELIVERED", + "FAILED", + "SENDING", + "PENDING_ENRICHMENT", + "ENRICHED" + ], + "subscriptionId": "sub-451afe55-2c8f-4103-a5f7-7bcf79e8e476", + "subscriptionType": "MessageStatus", + "targetIds": [ + "target-39dbd795-5909-40ab-95b2-4e88b11a2813" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "circuitBreaker": { + "enabled": true + }, + "maxRetryDurationSeconds": 7200, + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 300, + "targetId": "target-39dbd795-5909-40ab-95b2-4e88b11a2813", + "type": "API" + } + ] +} diff --git a/tests/performance/fixtures/subscriptions/perf-client-2.json b/tests/performance/fixtures/subscriptions/perf-client-2.json new file mode 100644 index 00000000..d3c58a93 --- /dev/null +++ b/tests/performance/fixtures/subscriptions/perf-client-2.json @@ -0,0 +1,54 @@ +{ + "clientId": "perf-client-2", + "subscriptions": [ + { + "channelStatuses": [ + "DELIVERED", + "FAILED", + "RETRY", + "SKIPPED", + "SENDING", + "CREATED" + ], + "channelType": "NHSAPP", + "subscriptionId": "sub-ace58855-9f6b-4491-8cee-abb99d997ced", + "subscriptionType": "ChannelStatus", + "supplierStatuses": [ + "delivered", + "permanent_failure", + "temporary_failure", + "pending", + "sending", + "sent" + ], + "targetIds": [ + "target-e3ccc2c2-7b19-4475-80d5-51a1182d239a" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "circuitBreaker": { + "enabled": true + }, + "maxRetryDurationSeconds": 7200, + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 300, + "targetId": "target-e3ccc2c2-7b19-4475-80d5-51a1182d239a", + "type": "API" + } + ] +} diff --git a/tests/performance/fixtures/subscriptions/perf-client-3.json b/tests/performance/fixtures/subscriptions/perf-client-3.json new file mode 100644 index 00000000..8034177b --- /dev/null +++ b/tests/performance/fixtures/subscriptions/perf-client-3.json @@ -0,0 +1,64 @@ +{ + "clientId": "perf-client-3", + "subscriptions": [ + { + "messageStatuses": [ + "DELIVERED" + ], + "subscriptionId": "sub-72197a52-8f4a-4b9d-b074-90f51183b91c", + "subscriptionType": "MessageStatus", + "targetIds": [ + "target-9f81befc-8cd2-49d7-9972-40b11c932d80", + "target-42228749-1610-4862-bcbf-ffb5b3a6f7eb" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "circuitBreaker": { + "enabled": true + }, + "maxRetryDurationSeconds": 7200, + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 300, + "targetId": "target-9f81befc-8cd2-49d7-9972-40b11c932d80", + "type": "API" + }, + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "circuitBreaker": { + "enabled": true + }, + "maxRetryDurationSeconds": 7200, + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 300, + "targetId": "target-42228749-1610-4862-bcbf-ffb5b3a6f7eb", + "type": "API" + } + ] +} diff --git a/tests/performance/fixtures/subscriptions/perf-client-4.json b/tests/performance/fixtures/subscriptions/perf-client-4.json new file mode 100644 index 00000000..b6c72346 --- /dev/null +++ b/tests/performance/fixtures/subscriptions/perf-client-4.json @@ -0,0 +1,57 @@ +{ + "clientId": "perf-client-4", + "subscriptions": [ + { + "messageStatuses": [ + "DELIVERED", + "FAILED" + ], + "subscriptionId": "sub-31908329-f6ce-4655-94a0-1ceb42073f13", + "subscriptionType": "MessageStatus", + "targetIds": [ + "target-11c2d19e-e8c9-4058-8175-546eabd1def2" + ] + }, + { + "channelStatuses": [ + "DELIVERED", + "FAILED" + ], + "channelType": "NHSAPP", + "subscriptionId": "sub-4f8a6b2c-d193-47e5-b860-7a9f3c1d2e4b", + "subscriptionType": "ChannelStatus", + "supplierStatuses": [ + "delivered", + "permanent_failure" + ], + "targetIds": [ + "target-11c2d19e-e8c9-4058-8175-546eabd1def2" + ] + } + ], + "targets": [ + { + "apiKey": { + "headerName": "x-api-key", + "headerValue": "REPLACED_BY_TERRAFORM" + }, + "delivery": { + "circuitBreaker": { + "enabled": true + }, + "maxRetryDurationSeconds": 7200, + "mtls": { + "certPinning": { + "enabled": false + }, + "enabled": false + } + }, + "invocationEndpoint": "https://REPLACED_BY_TERRAFORM", + "invocationMethod": "POST", + "invocationRateLimit": 300, + "targetId": "target-11c2d19e-e8c9-4058-8175-546eabd1def2", + "type": "API" + } + ] +} diff --git a/tests/performance/helpers/cloudwatch.ts b/tests/performance/helpers/cloudwatch.ts deleted file mode 100644 index 33772ba6..00000000 --- a/tests/performance/helpers/cloudwatch.ts +++ /dev/null @@ -1,156 +0,0 @@ -import { - CloudWatchLogsClient, - FilterLogEventsCommand, - GetQueryResultsCommand, - StartQueryCommand, -} from "@aws-sdk/client-cloudwatch-logs"; -import { waitUntil } from "async-wait-until"; - -const POLL_INTERVAL_MS = 2000; -const COLLECT_TIMEOUT_MS = 120_000; - -type BatchCompletedLogEntry = { - processingTimeMs: number; - batchSize: number; - successful: number; - failed: number; - filtered: number; -}; - -export async function collectBatchProcessingTimes( - client: CloudWatchLogsClient, - logGroupName: string, - expectedCount: number, - startTime: number, -): Promise { - const collected: number[] = []; - - await waitUntil( - async () => { - const response = await client.send( - new FilterLogEventsCommand({ - logGroupName, - startTime, - filterPattern: '{ $.msg = "batch-processing-completed" }', - }), - ); - - for (const event of response.events ?? []) { - if (event.message) { - try { - const entry = JSON.parse(event.message) as BatchCompletedLogEntry; - if (typeof entry.processingTimeMs === "number") { - collected.push(entry.processingTimeMs); - } - } catch { - // skip unparseable entries - } - } - } - - return collected.length >= expectedCount; - }, - { timeout: COLLECT_TIMEOUT_MS, intervalBetweenAttempts: POLL_INTERVAL_MS }, - ); - - return collected; -} - -export function computePercentile( - samples: number[], - percentile: number, -): number { - if (samples.length === 0) { - throw new Error("Cannot compute percentile of empty array"); - } - - const sorted = [...samples].toSorted((a, b) => a - b); - const index = Math.ceil((percentile / 100) * sorted.length) - 1; - return sorted[Math.max(0, index)]; -} - -const INSIGHTS_QUERY_TIMEOUT_MS = 60_000; -const INSIGHTS_COLLECT_TIMEOUT_MS = 300_000; - -async function runInsightsQuery( - client: CloudWatchLogsClient, - logGroupName: string, - startTimeSec: number, - endTimeSec: number, - percentile: number, -): Promise<{ count: number; percentileMs: number } | null> { - const { queryId } = await client.send( - new StartQueryCommand({ - logGroupName, - startTime: startTimeSec, - endTime: endTimeSec, - queryString: [ - 'filter msg = "batch-processing-completed"', - `| stats count(*) as eventCount, pct(processingTimeMs, ${percentile}) as p`, - ].join("\n"), - }), - ); - - if (!queryId) return null; - - const deadline = Date.now() + INSIGHTS_QUERY_TIMEOUT_MS; - - while (Date.now() < deadline) { - await new Promise((resolve) => { - setTimeout(resolve, 2000); - }); - - const response = await client.send(new GetQueryResultsCommand({ queryId })); - - if (response.status === "Failed" || response.status === "Cancelled") { - return null; - } - - if (response.status === "Complete") { - const row = response.results?.[0]; - if (!row) return null; - - return { - count: Number(row.find((f) => f.field === "eventCount")?.value ?? 0), - percentileMs: Number(row.find((f) => f.field === "p")?.value ?? 0), - }; - } - } - - return null; -} - -export async function waitForBatchProcessingPercentile( - client: CloudWatchLogsClient, - logGroupName: string, - testStartTime: number, - expectedCount: number, - percentile: number, -): Promise<{ count: number; percentileMs: number }> { - const startTimeSec = Math.floor(testStartTime / 1000); - let result = { count: 0, percentileMs: 0 }; - - await waitUntil( - async () => { - const endTimeSec = Math.floor((Date.now() + 60_000) / 1000); - const queryResult = await runInsightsQuery( - client, - logGroupName, - startTimeSec, - endTimeSec, - percentile, - ); - - if (!queryResult) return false; - - result = queryResult; - return result.count >= expectedCount; - }, - { - timeout: INSIGHTS_COLLECT_TIMEOUT_MS, - intervalBetweenAttempts: POLL_INTERVAL_MS, - }, - ); - - return result; -} diff --git a/tests/performance/helpers/deployment.ts b/tests/performance/helpers/deployment.ts deleted file mode 100644 index 5d6ee82e..00000000 --- a/tests/performance/helpers/deployment.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { - type DeploymentDetails, - buildLambdaLogGroupName, -} from "@nhs-notify-client-callbacks/test-support/helpers/deployment"; - -export function buildTransformFilterLambdaLogGroupName( - details: DeploymentDetails, -): string { - return buildLambdaLogGroupName(details, "client-transform-filter"); -} diff --git a/tests/performance/helpers/index.ts b/tests/performance/helpers/index.ts deleted file mode 100644 index 194022a3..00000000 --- a/tests/performance/helpers/index.ts +++ /dev/null @@ -1,4 +0,0 @@ -export * from "./cloudwatch"; -export * from "./deployment"; -export * from "./event-factories"; -export * from "./sqs"; diff --git a/tests/performance/helpers/sqs.ts b/tests/performance/helpers/sqs.ts deleted file mode 100644 index e8d5b171..00000000 --- a/tests/performance/helpers/sqs.ts +++ /dev/null @@ -1,72 +0,0 @@ -import { - SQSClient, - SendMessageBatchCommand, - SendMessageCommand, -} from "@aws-sdk/client-sqs"; -import type { StatusPublishEvent } from "@nhs-notify-client-callbacks/models"; - -export async function sendSqsEvent( - client: SQSClient, - queueUrl: string, - event: StatusPublishEvent, -): Promise { - await client.send( - new SendMessageCommand({ - QueueUrl: queueUrl, - MessageBody: JSON.stringify(event), - }), - ); -} - -const SQS_MAX_BATCH_SIZE = 10; - -export async function sendSqsBatch( - client: SQSClient, - queueUrl: string, - events: StatusPublishEvent[], -): Promise { - await client.send( - new SendMessageBatchCommand({ - QueueUrl: queueUrl, - Entries: events.map((event, index) => ({ - Id: String(index), - MessageBody: JSON.stringify(event), - })), - }), - ); -} - -export async function generateSqsLoad( - client: SQSClient, - queueUrl: string, - targetEventsPerSecond: number, - durationSeconds: number, - eventFactory: () => StatusPublishEvent, -): Promise<{ sent: number; durationMs: number }> { - const batchesPerSecond = Math.ceil( - targetEventsPerSecond / SQS_MAX_BATCH_SIZE, - ); - const start = Date.now(); - let sent = 0; - - for (let second = 0; second < durationSeconds; second++) { - const waveStart = Date.now(); - - const results = await Promise.all( - Array.from({ length: batchesPerSecond }, () => { - const batch = Array.from({ length: SQS_MAX_BATCH_SIZE }, eventFactory); - return sendSqsBatch(client, queueUrl, batch).then(() => batch.length); - }), - ); - sent += results.reduce((sum, count) => sum + count, 0); - - const remaining = 1000 - (Date.now() - waveStart); - if (remaining > 0 && second < durationSeconds - 1) { - await new Promise((resolve) => { - setTimeout(resolve, remaining); - }); - } - } - - return { sent, durationMs: Date.now() - start }; -} diff --git a/tests/performance/jest.config.ts b/tests/performance/jest.config.ts deleted file mode 100644 index 06f45e6d..00000000 --- a/tests/performance/jest.config.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { nodeJestConfig } from "../../jest.config.base.ts"; - -export default { - ...nodeJestConfig, - modulePaths: [""], - collectCoverage: false, - moduleNameMapper: { - "^helpers$": "/helpers/index", - }, - // Run performance tests serially to avoid queue contention - maxWorkers: 1, - // Force exit after tests complete — real AWS SDK clients keep connections alive - forceExit: true, -}; diff --git a/tests/performance/lambda-throughput.test.ts b/tests/performance/lambda-throughput.test.ts deleted file mode 100644 index 5a543ab6..00000000 --- a/tests/performance/lambda-throughput.test.ts +++ /dev/null @@ -1,76 +0,0 @@ -import { CloudWatchLogsClient } from "@aws-sdk/client-cloudwatch-logs"; -import { SQSClient } from "@aws-sdk/client-sqs"; -import { - buildInboundEventQueueUrl, - createCloudWatchLogsClient, - createSqsClient, - getDeploymentDetails, -} from "@nhs-notify-client-callbacks/test-support/helpers"; -import { - buildTransformFilterLambdaLogGroupName, - createMessageStatusPublishEvent, - generateSqsLoad, - waitForBatchProcessingPercentile, -} from "helpers"; - -const TARGET_EPS = 3000; -const LOAD_DURATION_SECONDS = 30; -const P95_LATENCY_THRESHOLD_MS = 500; - -describe("Lambda throughput and latency under load", () => { - let sqsClient: SQSClient; - let cloudWatchClient: CloudWatchLogsClient; - let inboundQueueUrl: string; - let lambdaLogGroupName: string; - - beforeAll(() => { - const deploymentDetails = getDeploymentDetails(); - - sqsClient = createSqsClient(deploymentDetails); - cloudWatchClient = createCloudWatchLogsClient(deploymentDetails); - inboundQueueUrl = buildInboundEventQueueUrl(deploymentDetails); - lambdaLogGroupName = - buildTransformFilterLambdaLogGroupName(deploymentDetails); - }); - - afterAll(() => { - sqsClient.destroy(); - cloudWatchClient.destroy(); - }); - - it(`should sustain ~${TARGET_EPS} events/s for ${LOAD_DURATION_SECONDS}s with p95 Lambda processing time below ${P95_LATENCY_THRESHOLD_MS}ms`, async () => { - const testStartTime = Date.now(); - - const { durationMs, sent } = await generateSqsLoad( - sqsClient, - inboundQueueUrl, - TARGET_EPS, - LOAD_DURATION_SECONDS, - createMessageStatusPublishEvent, - ); - - const achievedEps = Math.round(sent / (durationMs / 1000)); - console.log( - `Load generation: ${sent} events in ${durationMs}ms (${achievedEps} eps achieved)`, - ); - - // Accept ≥90% of sent events processed — accounts for any events routed to DLQ - // due to transient Lambda errors under concurrency pressure. - const minExpectedCount = Math.floor(sent * 0.9); - - const { count, percentileMs } = await waitForBatchProcessingPercentile( - cloudWatchClient, - lambdaLogGroupName, - testStartTime, - minExpectedCount, - 95, - ); - - console.log( - `Processing: ${count} events logged, p95 Lambda processing time: ${percentileMs}ms`, - ); - - expect(count).toBeGreaterThanOrEqual(minExpectedCount); - expect(percentileMs).toBeLessThan(P95_LATENCY_THRESHOLD_MS); - }, 600_000); -}); diff --git a/tools/client-subscriptions-management/README.md b/tools/client-subscriptions-management/README.md index 7593bb07..9bef106b 100644 --- a/tools/client-subscriptions-management/README.md +++ b/tools/client-subscriptions-management/README.md @@ -7,7 +7,7 @@ TypeScript CLI utility for managing NHS Notify client subscription configuration From the repository root run: ```bash -npm --workspace tools/client-subscriptions-management run -- [options] +pnpm --filter client-subscriptions-management run -- [options] ``` ## Example @@ -15,7 +15,7 @@ npm --workspace tools/client-subscriptions-management run -- [options] Deploy a message status subscription to the `dev` environment using a named AWS profile: ```bash -npm --workspace tools/client-subscriptions-management run deploy -- message \ +pnpm --filter client-subscriptions-management run deploy -- message \ --environment dev \ --profile my-profile \ --client-id my-client \ @@ -33,7 +33,7 @@ npm --workspace tools/client-subscriptions-management run deploy -- message \ #### Message status ```bash -npm --workspace tools/client-subscriptions-management run deploy -- message \ +pnpm --filter client-subscriptions-management run deploy -- message \ --environment dev \ --client-id client-123 \ --message-statuses DELIVERED FAILED \ @@ -47,7 +47,7 @@ npm --workspace tools/client-subscriptions-management run deploy -- message \ #### Channel status ```bash -npm --workspace tools/client-subscriptions-management run deploy -- channel \ +pnpm --filter client-subscriptions-management run deploy -- channel \ --environment dev \ --client-id client-123 \ --channel-type EMAIL \ @@ -67,7 +67,7 @@ Optional for both: `--client-name "Test Client"` (defaults to client-id if not p ### Get Client Subscriptions By Client ID ```bash -npm --workspace tools/client-subscriptions-management run get-by-client-id -- \ +pnpm --filter client-subscriptions-management run get-by-client-id -- \ --environment dev \ --client-id client-123 ``` @@ -75,7 +75,7 @@ npm --workspace tools/client-subscriptions-management run get-by-client-id -- \ ### Put Message Status Subscription (S3 upload only) ```bash -npm --workspace tools/client-subscriptions-management run put-message-status -- \ +pnpm --filter client-subscriptions-management run put-message-status -- \ --environment dev \ --client-id client-123 \ --message-statuses DELIVERED FAILED \ @@ -91,7 +91,7 @@ Optional: `--client-name "Test Client"` (defaults to client-id if not provided), ### Put Channel Status Subscription (S3 upload only) ```bash -npm --workspace tools/client-subscriptions-management run put-channel-status -- \ +pnpm --filter client-subscriptions-management run put-channel-status -- \ --environment dev \ --client-id client-123 \ --channel-type EMAIL \ diff --git a/tools/client-subscriptions-management/package.json b/tools/client-subscriptions-management/package.json index ec4cb3b3..4d934470 100644 --- a/tools/client-subscriptions-management/package.json +++ b/tools/client-subscriptions-management/package.json @@ -29,6 +29,7 @@ "@aws-sdk/client-sts": "catalog:aws", "@aws-sdk/credential-providers": "catalog:aws", "@nhs-notify-client-callbacks/models": "workspace:*", + "picocolors": "catalog:app", "table": "catalog:app", "yargs": "catalog:app", "zod": "catalog:app" diff --git a/tools/client-subscriptions-management/src/__tests__/domain/client-subscription-builder.test.ts b/tools/client-subscriptions-management/src/__tests__/domain/client-subscription-builder.test.ts index 10fcb111..edc4b857 100644 --- a/tools/client-subscriptions-management/src/__tests__/domain/client-subscription-builder.test.ts +++ b/tools/client-subscriptions-management/src/__tests__/domain/client-subscription-builder.test.ts @@ -8,6 +8,16 @@ const UUID_REGEX = /^[\da-f]{8}-[\da-f]{4}-4[\da-f]{3}-[89ab][\da-f]{3}-[\da-f]{12}$/i; describe("buildTarget", () => { + let warnSpy: jest.SpyInstance; + + beforeEach(() => { + warnSpy = jest.spyOn(console, "warn").mockImplementation(); + }); + + afterEach(() => { + warnSpy.mockRestore(); + }); + it("builds a target with required fields", () => { const result = buildTarget({ apiEndpoint: "https://example.com/webhook", @@ -22,6 +32,9 @@ describe("buildTarget", () => { invocationMethod: "POST", invocationRateLimit: 10, apiKey: { headerName: "x-api-key", headerValue: "secret" }, + delivery: { + mtls: { enabled: false, certPinning: { enabled: false } }, + }, }); expect(result.targetId).toMatch(UUID_REGEX); }); @@ -35,8 +48,122 @@ describe("buildTarget", () => { expect(result.apiKey.headerName).toBe("x-api-key"); }); -}); + it("emits warning when mtls is disabled", () => { + buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + mtls: { enabled: false }, + }); + + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining("mTLS is disabled"), + ); + }); + + it("emits warning when mtls enabled but certPinning disabled", () => { + buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + mtls: { enabled: true }, + certPinning: { enabled: false }, + }); + + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining("certificate pinning is disabled"), + ); + }); + + it("throws when certPinning enabled without spkiHash", () => { + expect(() => + buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + mtls: { enabled: true }, + certPinning: { enabled: true }, + }), + ).toThrow("Certificate pinning cannot be enabled without an SPKI hash"); + }); + + it("emits warning when certPinning enabled but mtls disabled", () => { + buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + mtls: { enabled: false }, + certPinning: { + enabled: true, + spkiHash: "dGVzdGhhc2g9PT09PT09PT09PT09PT09PT09PT09PQ==", + }, + }); + + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining("mTLS is disabled"), + ); + }); + + it("emits no warnings for fully secure config", () => { + buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + mtls: { enabled: true }, + certPinning: { enabled: true, spkiHash: "abc123" }, + }); + + expect(warnSpy).not.toHaveBeenCalled(); + }); + + it("emits warning when maxRetryDurationSeconds is below 60", () => { + buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + maxRetryDurationSeconds: 30, + }); + + expect(warnSpy).toHaveBeenCalledWith( + expect.stringContaining("maxRetryDurationSeconds is 30s"), + ); + }); + + it("does not emit warning when maxRetryDurationSeconds is 60 or above", () => { + buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + maxRetryDurationSeconds: 60, + mtls: { enabled: true }, + certPinning: { enabled: true, spkiHash: "abc123" }, + }); + + expect(warnSpy).not.toHaveBeenCalled(); + }); + + it("includes maxRetryDurationSeconds in delivery when provided", () => { + const result = buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + maxRetryDurationSeconds: 3600, + }); + + expect(result.delivery?.maxRetryDurationSeconds).toBe(3600); + }); + + it("omits maxRetryDurationSeconds from delivery when not provided", () => { + const result = buildTarget({ + apiEndpoint: "https://example.com/webhook", + apiKey: "secret", + rateLimit: 10, + }); + + expect(result.delivery).not.toHaveProperty("maxRetryDurationSeconds"); + }); +}); describe("buildMessageStatusSubscription", () => { it("builds message status subscription", () => { const result = buildMessageStatusSubscription({ diff --git a/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-certificate.test.ts b/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-certificate.test.ts new file mode 100644 index 00000000..a902ed0c --- /dev/null +++ b/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-certificate.test.ts @@ -0,0 +1,131 @@ +import path from "node:path"; +import { mkdtempSync, unlinkSync, writeFileSync } from "node:fs"; +import os from "node:os"; +import * as cli from "src/entrypoint/cli/targets-set-certificate"; +import { + captureCliConsoleState, + expectWrappedCliError, + getMockCreateRepository, + resetCliConsoleState, + resetMockCreateRepository, + restoreCliConsoleState, +} from "src/__tests__/entrypoint/cli/test-utils"; +import { + createClientSubscriptionConfig, + createTarget, +} from "src/__tests__/helpers/client-subscription-fixtures"; + +const mockGetClientConfig = jest.fn(); +const mockPutClientConfig = jest.fn(); +const mockFormatClientConfig = jest.fn(); + +jest.mock("src/entrypoint/cli/helper", () => ({ + ...jest.requireActual("src/entrypoint/cli/helper"), + createRepository: jest.fn(), +})); +jest.mock("src/format", () => ({ + formatClientConfig: (...args: unknown[]) => mockFormatClientConfig(...args), +})); + +const FIXTURE_CERT_PATH = path.join(__dirname, "../../fixtures/test-cert.pem"); +const EXPECTED_SPKI_HASH = "SpGTft7LNMxLIx5s9GMAaHTo1uz4eqMtrAFws3Exs8I="; + +const target = createTarget(); +const config = createClientSubscriptionConfig({ targets: [target] }); +const mockCreateRepository = getMockCreateRepository(); + +describe("targets-set-certificate CLI", () => { + const originalCliConsoleState = captureCliConsoleState(); + + const baseArgs = [ + "node", + "script", + "--client-id", + "client-1", + "--bucket-name", + "bucket-1", + "--target-id", + target.targetId, + ]; + + beforeEach(() => { + mockGetClientConfig.mockReset(); + mockGetClientConfig.mockResolvedValue(config); + mockPutClientConfig.mockReset(); + mockPutClientConfig.mockResolvedValue(config); + mockFormatClientConfig.mockReset(); + mockFormatClientConfig.mockReturnValue("formatted-output"); + resetMockCreateRepository({ + getClientConfig: mockGetClientConfig, + putClientConfig: mockPutClientConfig, + }); + resetCliConsoleState(); + }); + + afterAll(() => { + restoreCliConsoleState(originalCliConsoleState); + }); + + it("extracts SPKI hash from valid PEM and stores it", async () => { + await cli.main([...baseArgs, "--pem-file", FIXTURE_CERT_PATH]); + + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.objectContaining({ + targets: [ + expect.objectContaining({ + delivery: expect.objectContaining({ + mtls: expect.objectContaining({ + certPinning: expect.objectContaining({ + spkiHash: EXPECTED_SPKI_HASH, + }), + }), + }), + }), + ], + }), + false, + ); + }); + + it("errors for invalid PEM file", async () => { + const tmpDir = mkdtempSync(path.join(os.tmpdir(), "cert-test-")); + const invalidPath = path.join(tmpDir, "invalid.pem"); + // eslint-disable-next-line security/detect-non-literal-fs-filename + writeFileSync(invalidPath, "not-a-pem"); + + await cli.main([...baseArgs, "--pem-file", invalidPath]).catch(() => {}); + + expect(mockPutClientConfig).not.toHaveBeenCalled(); + + // eslint-disable-next-line security/detect-non-literal-fs-filename + unlinkSync(invalidPath); + }); + + it("passes dry-run to putClientConfig", async () => { + await cli.main([ + ...baseArgs, + "--pem-file", + FIXTURE_CERT_PATH, + "--dry-run", + "true", + ]); + + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.any(Object), + true, + ); + }); + + it("handles repository errors in wrapped CLI", async () => { + expect.hasAssertions(); + mockCreateRepository.mockRejectedValue(new Error("Boom")); + + await expectWrappedCliError(cli.main, [ + ...baseArgs, + "--pem-file", + FIXTURE_CERT_PATH, + ]); + }); +}); diff --git a/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-mtls.test.ts b/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-mtls.test.ts new file mode 100644 index 00000000..a0e53092 --- /dev/null +++ b/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-mtls.test.ts @@ -0,0 +1,119 @@ +import * as cli from "src/entrypoint/cli/targets-set-mtls"; +import { + captureCliConsoleState, + expectWrappedCliError, + getMockCreateRepository, + resetCliConsoleState, + resetMockCreateRepository, + restoreCliConsoleState, +} from "src/__tests__/entrypoint/cli/test-utils"; +import { + createClientSubscriptionConfig, + createTarget, +} from "src/__tests__/helpers/client-subscription-fixtures"; + +const mockGetClientConfig = jest.fn(); +const mockPutClientConfig = jest.fn(); +const mockFormatClientConfig = jest.fn(); + +jest.mock("src/entrypoint/cli/helper", () => ({ + ...jest.requireActual("src/entrypoint/cli/helper"), + createRepository: jest.fn(), +})); +jest.mock("src/format", () => ({ + formatClientConfig: (...args: unknown[]) => mockFormatClientConfig(...args), +})); + +const target = createTarget(); +const config = createClientSubscriptionConfig({ targets: [target] }); +const mockCreateRepository = getMockCreateRepository(); + +describe("targets-set-mtls CLI", () => { + const originalCliConsoleState = captureCliConsoleState(); + + const baseArgs = [ + "node", + "script", + "--client-id", + "client-1", + "--bucket-name", + "bucket-1", + "--target-id", + target.targetId, + ]; + + beforeEach(() => { + mockGetClientConfig.mockReset(); + mockGetClientConfig.mockResolvedValue(config); + mockPutClientConfig.mockReset(); + mockPutClientConfig.mockResolvedValue(config); + mockFormatClientConfig.mockReset(); + mockFormatClientConfig.mockReturnValue("formatted-output"); + resetMockCreateRepository({ + getClientConfig: mockGetClientConfig, + putClientConfig: mockPutClientConfig, + }); + resetCliConsoleState(); + console.warn = jest.fn(); + }); + + afterAll(() => { + restoreCliConsoleState(originalCliConsoleState); + }); + + it("enables mTLS with --enable flag", async () => { + await cli.main([...baseArgs, "--enable"]); + + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.objectContaining({ + targets: [ + expect.objectContaining({ + delivery: expect.objectContaining({ + mtls: expect.objectContaining({ enabled: true }), + }), + }), + ], + }), + false, + ); + }); + + it("disables mTLS with --no-enable flag and emits ANSI warning", async () => { + await cli.main([...baseArgs, "--no-enable"]); + + expect(console.warn).toHaveBeenCalledWith( + expect.stringContaining("Disabling mTLS"), + ); + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.objectContaining({ + targets: [ + expect.objectContaining({ + delivery: expect.objectContaining({ + mtls: expect.objectContaining({ enabled: false }), + }), + }), + ], + }), + false, + ); + }); + + it("passes dry-run to putClientConfig", async () => { + await cli.main([...baseArgs, "--enable", "--dry-run", "true"]); + + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.any(Object), + true, + ); + }); + + it("handles errors in wrapped CLI", async () => { + expect.hasAssertions(); + mockCreateRepository.mockRejectedValue(new Error("Boom")); + + await expectWrappedCliError(cli.main, [...baseArgs, "--enable"]); + }); +}); diff --git a/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-pinning.test.ts b/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-pinning.test.ts new file mode 100644 index 00000000..051dcbdd --- /dev/null +++ b/tools/client-subscriptions-management/src/__tests__/entrypoint/cli/targets-set-pinning.test.ts @@ -0,0 +1,177 @@ +import * as cli from "src/entrypoint/cli/targets-set-pinning"; +import { + captureCliConsoleState, + expectWrappedCliError, + getMockCreateRepository, + resetCliConsoleState, + resetMockCreateRepository, + restoreCliConsoleState, +} from "src/__tests__/entrypoint/cli/test-utils"; +import { + createClientSubscriptionConfig, + createTarget, +} from "src/__tests__/helpers/client-subscription-fixtures"; + +const mockGetClientConfig = jest.fn(); +const mockPutClientConfig = jest.fn(); +const mockFormatClientConfig = jest.fn(); + +jest.mock("src/entrypoint/cli/helper", () => ({ + ...jest.requireActual("src/entrypoint/cli/helper"), + createRepository: jest.fn(), +})); +jest.mock("src/format", () => ({ + formatClientConfig: (...args: unknown[]) => mockFormatClientConfig(...args), +})); + +const target = createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true, spkiHash: "existing-hash" }, + }, + }, +}); +const config = createClientSubscriptionConfig({ targets: [target] }); +const mockCreateRepository = getMockCreateRepository(); + +describe("targets-set-pinning CLI", () => { + const originalCliConsoleState = captureCliConsoleState(); + + const baseArgs = [ + "node", + "script", + "--client-id", + "client-1", + "--bucket-name", + "bucket-1", + "--target-id", + target.targetId, + ]; + + beforeEach(() => { + mockGetClientConfig.mockReset(); + mockGetClientConfig.mockResolvedValue( + createClientSubscriptionConfig({ + targets: [ + createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: true, spkiHash: "existing-hash" }, + }, + }, + }), + ], + }), + ); + mockPutClientConfig.mockReset(); + mockPutClientConfig.mockResolvedValue(config); + mockFormatClientConfig.mockReset(); + mockFormatClientConfig.mockReturnValue("formatted-output"); + resetMockCreateRepository({ + getClientConfig: mockGetClientConfig, + putClientConfig: mockPutClientConfig, + }); + resetCliConsoleState(); + console.warn = jest.fn(); + }); + + afterAll(() => { + restoreCliConsoleState(originalCliConsoleState); + }); + + it("enables certificate pinning with --enable flag", async () => { + await cli.main([...baseArgs, "--enable"]); + + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.objectContaining({ + targets: [ + expect.objectContaining({ + delivery: expect.objectContaining({ + mtls: expect.objectContaining({ + certPinning: { enabled: true, spkiHash: "existing-hash" }, + }), + }), + }), + ], + }), + false, + ); + }); + + it("disables pinning with --no-enable flag and emits ANSI warning", async () => { + await cli.main([...baseArgs, "--no-enable"]); + + expect(console.warn).toHaveBeenCalledWith( + expect.stringContaining("Disabling certificate pinning"), + ); + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.objectContaining({ + targets: [ + expect.objectContaining({ + delivery: expect.objectContaining({ + mtls: expect.objectContaining({ + certPinning: { enabled: false, spkiHash: "existing-hash" }, + }), + }), + }), + ], + }), + false, + ); + }); + + it("preserves existing spkiHash when disabling", async () => { + await cli.main([...baseArgs, "--no-enable"]); + + const putCall = mockPutClientConfig.mock.calls[0]; + const updatedTarget = putCall[1].targets[0]; + expect(updatedTarget.delivery.mtls.certPinning.spkiHash).toBe( + "existing-hash", + ); + }); + + it("passes dry-run to putClientConfig", async () => { + await cli.main([...baseArgs, "--enable", "--dry-run", "true"]); + + expect(mockPutClientConfig).toHaveBeenCalledWith( + "client-1", + expect.any(Object), + true, + ); + }); + + it("handles errors in wrapped CLI", async () => { + expect.hasAssertions(); + mockCreateRepository.mockRejectedValue(new Error("Boom")); + + await expectWrappedCliError(cli.main, [...baseArgs, "--enable"]); + }); + + it("throws when enabling pinning but target has no spkiHash", async () => { + expect.hasAssertions(); + mockGetClientConfig.mockResolvedValue( + createClientSubscriptionConfig({ + targets: [ + createTarget({ + delivery: { + mtls: { + enabled: true, + certPinning: { enabled: false }, + }, + }, + }), + ], + }), + ); + + await expectWrappedCliError( + cli.main, + [...baseArgs, "--enable"], + `Target '${target.targetId}' has no SPKI hash stored. Run 'targets-set-certificate' first to configure a certificate hash before enabling pinning.`, + ); + }); +}); diff --git a/tools/client-subscriptions-management/src/__tests__/fixtures/test-cert.pem b/tools/client-subscriptions-management/src/__tests__/fixtures/test-cert.pem new file mode 100644 index 00000000..66accebb --- /dev/null +++ b/tools/client-subscriptions-management/src/__tests__/fixtures/test-cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICETCCAbgCCQD0bFWfktPerzAKBggqhkjOPQQDAjAXMRUwEwYDVQQDDAx0ZXN0 +LWZpeHR1cmUwHhcNMjYwNDE3MDgzMjAzWhcNMzYwNDE0MDgzMjAzWjAXMRUwEwYD +VQQDDAx0ZXN0LWZpeHR1cmUwggFLMIIBAwYHKoZIzj0CATCB9wIBATAsBgcqhkjO +PQEBAiEA/////wAAAAEAAAAAAAAAAAAAAAD///////////////8wWwQg/////wAA +AAEAAAAAAAAAAAAAAAD///////////////wEIFrGNdiqOpPns+u9VXaYhrxlHQaw +zFOw9jvOPD4n0mBLAxUAxJ02CIbnBJNqZnjhE50mt4GffpAEQQRrF9Hy4SxCR/i8 +5uVjpEDydwN9gS3rM6D0oTlF2JjClk/jQuL+Gn+bjufrSnwPnhYrzjNXazFezsu2 +QGg3v1H1AiEA/////wAAAAD//////////7zm+q2nF56E87nKwvxjJVECAQEDQgAE +WpOtSSCENuhBWnPFy4csFZkUT/t77xiQxJr/mrClSsNs4H7vwgXRc5OCT+BuTruT +J/kv6ipp/9s/c5/WP8Ln9zAKBggqhkjOPQQDAgNHADBEAiA46dblj9UZZe163Me1 +sydmzRkzLrtAy1pLCcPp86Z4WwIgRD6/Oa4UQ/C6tCDpdLquzAtRxSNKuHMPLXK9 +vkFt930= +-----END CERTIFICATE----- diff --git a/tools/client-subscriptions-management/src/domain/client-subscription-builder.ts b/tools/client-subscriptions-management/src/domain/client-subscription-builder.ts index f91ee5a4..514c48d7 100644 --- a/tools/client-subscriptions-management/src/domain/client-subscription-builder.ts +++ b/tools/client-subscriptions-management/src/domain/client-subscription-builder.ts @@ -7,12 +7,16 @@ import type { MessageStatusSubscriptionConfiguration, SupplierStatus, } from "@nhs-notify-client-callbacks/models"; +import pc from "picocolors"; export type BuildTargetArgs = { apiEndpoint: string; apiKey: string; apiKeyHeaderName?: string; rateLimit: number; + maxRetryDurationSeconds?: number; + mtls?: { enabled: boolean }; + certPinning?: { enabled: boolean; spkiHash?: string }; }; export type BuildMessageStatusSubscriptionArgs = { @@ -30,6 +34,42 @@ export type BuildChannelStatusSubscriptionArgs = { }; export function buildTarget(args: BuildTargetArgs): CallbackTarget { + const mtls = args.mtls ?? { enabled: false }; + const certPinning = args.certPinning ?? { enabled: false }; + + const warnings: string[] = []; + + if (!mtls.enabled) { + warnings.push("mTLS is disabled — callbacks will not use mutual TLS"); + } + + if (mtls.enabled && !certPinning.enabled) { + warnings.push("mTLS is enabled but certificate pinning is disabled"); + } + + if (certPinning.enabled && !certPinning.spkiHash) { + throw new Error( + "Certificate pinning cannot be enabled without an SPKI hash. Run 'targets-set-certificate' first.", + ); + } + + if (!mtls.enabled && certPinning.enabled) { + warnings.push("Certificate pinning is enabled but mTLS is disabled"); + } + + if ( + args.maxRetryDurationSeconds !== undefined && + args.maxRetryDurationSeconds < 60 + ) { + warnings.push( + `maxRetryDurationSeconds is ${args.maxRetryDurationSeconds}s — values below 60s may exhaust the retry window before a single delivery attempt completes`, + ); + } + + for (const warning of warnings) { + console.warn(pc.bold(pc.red(`WARNING: ${warning}`))); + } + return { targetId: crypto.randomUUID(), type: "API", @@ -40,6 +80,15 @@ export function buildTarget(args: BuildTargetArgs): CallbackTarget { headerName: args.apiKeyHeaderName ?? "x-api-key", headerValue: args.apiKey, }, + delivery: { + ...(args.maxRetryDurationSeconds !== undefined && { + maxRetryDurationSeconds: args.maxRetryDurationSeconds, + }), + mtls: { + ...mtls, + certPinning, + }, + }, }; } diff --git a/tools/client-subscriptions-management/src/entrypoint/cli/clients-put.ts b/tools/client-subscriptions-management/src/entrypoint/cli/clients-put.ts index c0d13554..ce3d1cca 100644 --- a/tools/client-subscriptions-management/src/entrypoint/cli/clients-put.ts +++ b/tools/client-subscriptions-management/src/entrypoint/cli/clients-put.ts @@ -56,8 +56,7 @@ export const handler: CliCommand["handler"] = async (argv) => { return; } - // Safe as this is an internal tool and this CLI option we are expecting the user will run locally and manually - // eslint-disable-next-line security/detect-non-literal-fs-filename + // eslint-disable-next-line security/detect-non-literal-fs-filename -- path is provided directly by the operator via CLI arg const rawJson = argv.json ?? readFileSync(argv.file!, "utf8"); let parsed: unknown; diff --git a/tools/client-subscriptions-management/src/entrypoint/cli/helper.ts b/tools/client-subscriptions-management/src/entrypoint/cli/helper.ts index 14e998dd..23070926 100644 --- a/tools/client-subscriptions-management/src/entrypoint/cli/helper.ts +++ b/tools/client-subscriptions-management/src/entrypoint/cli/helper.ts @@ -1,3 +1,7 @@ +import type { + CallbackTarget, + ClientSubscriptionConfiguration, +} from "@nhs-notify-client-callbacks/models"; import { createRepository as createRepositoryFromOptions, createSsmApplicationsMapRepository as createSsmApplicationsMapRepositoryFromOptions, @@ -124,6 +128,14 @@ export const clientIdOption = { }, }; +export const targetIdOption = { + "target-id": { + type: "string" as const, + demandOption: true as const, + description: "Target identifier", + }, +}; + export const writeOptions = { "dry-run": { type: "boolean" as const, @@ -159,3 +171,30 @@ export const createSsmApplicationsMapRepository = (argv: SsmCliArgs) => { profile, }); }; + +export async function requireClientConfig( + repository: { + getClientConfig: ( + clientId: string, + ) => Promise; + }, + clientId: string, +): Promise { + const config = await repository.getClientConfig(clientId); + if (!config) { + throw new Error(`No configuration found for client: ${clientId}`); + } + return config; +} + +export function requireTargetConfig( + config: ClientSubscriptionConfiguration, + clientId: string, + targetId: string, +): CallbackTarget { + const target = config.targets.find((t) => t.targetId === targetId); + if (!target) { + throw new Error(`Target '${targetId}' not found for client '${clientId}'`); + } + return target; +} diff --git a/tools/client-subscriptions-management/src/entrypoint/cli/targets-del.ts b/tools/client-subscriptions-management/src/entrypoint/cli/targets-del.ts index 6fe56ac2..7f2c3e19 100644 --- a/tools/client-subscriptions-management/src/entrypoint/cli/targets-del.ts +++ b/tools/client-subscriptions-management/src/entrypoint/cli/targets-del.ts @@ -7,6 +7,7 @@ import { commonOptions, createRepository, runCommand, + targetIdOption, writeOptions, } from "src/entrypoint/cli/helper"; import { formatClientConfig } from "src/format"; @@ -20,12 +21,8 @@ export const builder = (yargs: Argv) => yargs.options({ ...commonOptions, ...clientIdOption, + ...targetIdOption, ...writeOptions, - "target-id": { - type: "string", - demandOption: true, - description: "Target identifier to delete", - }, }); export const handler: CliCommand["handler"] = async (argv) => { diff --git a/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-certificate.ts b/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-certificate.ts new file mode 100644 index 00000000..857d2991 --- /dev/null +++ b/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-certificate.ts @@ -0,0 +1,94 @@ +import { X509Certificate, createHash } from "node:crypto"; +import { readFileSync } from "node:fs"; +import type { Argv } from "yargs"; +import { + type CliCommand, + type ClientCliArgs, + type WriteCliArgs, + clientIdOption, + commonOptions, + createRepository, + requireClientConfig, + requireTargetConfig, + runCommand, + targetIdOption, + writeOptions, +} from "src/entrypoint/cli/helper"; +import { formatClientConfig } from "src/format"; + +type TargetsSetCertificateArgs = ClientCliArgs & + WriteCliArgs & { + "target-id": string; + "pem-file": string; + }; + +export const builder = (yargs: Argv) => + yargs.options({ + ...commonOptions, + ...clientIdOption, + ...targetIdOption, + ...writeOptions, + "pem-file": { + type: "string", + demandOption: true, + description: "Path to PEM certificate file", + }, + }); + +function extractSpkiHash(pemPath: string): string { + // eslint-disable-next-line security/detect-non-literal-fs-filename -- path is provided directly by the operator via CLI arg + const pemBuffer = readFileSync(pemPath); + const x509 = new X509Certificate(pemBuffer); + const spkiDer = x509.publicKey.export({ + type: "spki", + format: "der", + }) as Buffer; + return createHash("sha256").update(spkiDer).digest("base64"); +} + +export const handler: CliCommand["handler"] = async ( + argv, +) => { + const spkiHash = extractSpkiHash(argv["pem-file"]); + console.log(`Extracted SPKI hash: ${spkiHash}`); + + const repository = await createRepository(argv); + const config = await requireClientConfig(repository, argv["client-id"]); + const target = requireTargetConfig( + config, + argv["client-id"], + argv["target-id"], + ); + + const mtls = target.delivery?.mtls ?? { enabled: false }; + const certPinning = mtls.certPinning ?? { enabled: false }; + target.delivery = { + ...target.delivery, + mtls: { + ...mtls, + certPinning: { + ...certPinning, + spkiHash, + }, + }, + }; + + const result = await repository.putClientConfig( + argv["client-id"], + config, + argv["dry-run"], + ); + console.log("Certificate SPKI hash stored successfully"); + console.log(formatClientConfig(result)); +}; + +export const command: CliCommand = { + command: "targets-set-certificate", + describe: "Extract and store SPKI hash from a PEM certificate for a target", + builder, + handler, +}; + +export async function main(args: string[] = process.argv) { + await runCommand(command, args); +} diff --git a/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-mtls.ts b/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-mtls.ts new file mode 100644 index 00000000..ae9127ef --- /dev/null +++ b/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-mtls.ts @@ -0,0 +1,87 @@ +import type { Argv } from "yargs"; +import pc from "picocolors"; +import { + type CliCommand, + type ClientCliArgs, + type WriteCliArgs, + clientIdOption, + commonOptions, + createRepository, + requireClientConfig, + requireTargetConfig, + runCommand, + targetIdOption, + writeOptions, +} from "src/entrypoint/cli/helper"; +import { formatClientConfig } from "src/format"; + +type TargetsSetMtlsArgs = ClientCliArgs & + WriteCliArgs & { + "target-id": string; + enable: boolean; + }; + +export const builder = (yargs: Argv) => + yargs.options({ + ...commonOptions, + ...clientIdOption, + ...targetIdOption, + ...writeOptions, + enable: { + type: "boolean", + demandOption: true, + description: + "Enable or disable mTLS for this target (use --no-enable to disable)", + }, + }); + +export const handler: CliCommand["handler"] = async ( + argv, +) => { + const enabled = argv.enable; + + if (!enabled) { + console.warn( + pc.bold( + pc.red("WARNING: Disabling mTLS — callbacks will not use mutual TLS"), + ), + ); + } + + const repository = await createRepository(argv); + const config = await requireClientConfig(repository, argv["client-id"]); + const target = requireTargetConfig( + config, + argv["client-id"], + argv["target-id"], + ); + + target.delivery = { + ...target.delivery, + mtls: { + ...target.delivery?.mtls, + enabled, + }, + }; + + const result = await repository.putClientConfig( + argv["client-id"], + config, + argv["dry-run"], + ); + console.log( + `mTLS ${enabled ? "enabled" : "disabled"} for target ${argv["target-id"]}`, + ); + console.log(formatClientConfig(result)); +}; + +export const command: CliCommand = { + command: "targets-set-mtls", + describe: "Enable or disable mTLS for a callback target", + builder, + handler, +}; + +export async function main(args: string[] = process.argv) { + await runCommand(command, args); +} diff --git a/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-pinning.ts b/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-pinning.ts new file mode 100644 index 00000000..6816d85f --- /dev/null +++ b/tools/client-subscriptions-management/src/entrypoint/cli/targets-set-pinning.ts @@ -0,0 +1,94 @@ +import type { Argv } from "yargs"; +import pc from "picocolors"; +import { + type CliCommand, + type ClientCliArgs, + type WriteCliArgs, + clientIdOption, + commonOptions, + createRepository, + requireClientConfig, + requireTargetConfig, + runCommand, + targetIdOption, + writeOptions, +} from "src/entrypoint/cli/helper"; +import { formatClientConfig } from "src/format"; + +type TargetsSetPinningArgs = ClientCliArgs & + WriteCliArgs & { + "target-id": string; + enable: boolean; + }; + +export const builder = (yargs: Argv) => + yargs.options({ + ...commonOptions, + ...clientIdOption, + ...targetIdOption, + ...writeOptions, + enable: { + type: "boolean", + demandOption: true, + description: + "Enable or disable certificate pinning for this target (use --no-enable to disable)", + }, + }); + +export const handler: CliCommand["handler"] = async ( + argv, +) => { + const enabled = argv.enable; + + if (!enabled) { + console.warn(pc.bold(pc.red("WARNING: Disabling certificate pinning"))); + } + + const repository = await createRepository(argv); + const config = await requireClientConfig(repository, argv["client-id"]); + const target = requireTargetConfig( + config, + argv["client-id"], + argv["target-id"], + ); + + if (enabled && !target.delivery?.mtls?.certPinning?.spkiHash) { + throw new Error( + `Target '${argv["target-id"]}' has no SPKI hash stored. Run 'targets-set-certificate' first to configure a certificate hash before enabling pinning.`, + ); + } + + const mtls = target.delivery?.mtls ?? { enabled: false }; + const certPinning = mtls.certPinning ?? { enabled: false }; + target.delivery = { + ...target.delivery, + mtls: { + ...mtls, + certPinning: { + ...certPinning, + enabled, + }, + }, + }; + + const result = await repository.putClientConfig( + argv["client-id"], + config, + argv["dry-run"], + ); + console.log( + `Certificate pinning ${enabled ? "enabled" : "disabled"} for target ${argv["target-id"]}`, + ); + console.log(formatClientConfig(result)); +}; + +export const command: CliCommand = { + command: "targets-set-pinning", + describe: "Enable or disable certificate pinning for a callback target", + builder, + handler, +}; + +export async function main(args: string[] = process.argv) { + await runCommand(command, args); +} diff --git a/tools/client-subscriptions-management/src/repository/client-subscriptions.ts b/tools/client-subscriptions-management/src/repository/client-subscriptions.ts index 4a744fc3..04fc266b 100644 --- a/tools/client-subscriptions-management/src/repository/client-subscriptions.ts +++ b/tools/client-subscriptions-management/src/repository/client-subscriptions.ts @@ -131,7 +131,7 @@ export class ClientSubscriptionRepository { const updated: ClientSubscriptionConfiguration = { ...config, subscriptions: config.subscriptions.map( - // eslint-disable-next-line sonarjs/function-return-type -- false positive: complex conditional spread returns are all SubscriptionConfiguration subtypes + // eslint-disable-next-line sonarjs/function-return-type (sub): SubscriptionConfiguration => { if (sub.subscriptionId !== subscriptionId) return sub; if (sub.subscriptionType === "MessageStatus") {