From f02fb2854a05cdcebf09ce7bae3f216643904668 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 27 Feb 2026 18:48:47 +0000 Subject: [PATCH 01/66] ELI-615 | campaign having recent - active start_date supersedes the others sharing same best-status --- .../calculators/eligibility_calculator.py | 4 +- .../in_process/test_eligibility_endpoint.py | 141 +++++++++++++++++- 2 files changed, 142 insertions(+), 3 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index b7314bdcc..2a071b2bc 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -124,7 +124,9 @@ def get_eligibility_status(self, include_actions: str, conditions: list[str], ca return eligibility_status.EligibilityStatus(conditions=final_result) def get_best_iteration_result(self, campaign_group: list[CampaignConfig]) -> BestIterationResult | None: - iteration_results = self.get_iteration_results(campaign_group) + sorted_campaigns = sorted(campaign_group, key=lambda c: c.start_date, reverse=True) + + iteration_results = self.get_iteration_results(sorted_campaigns) if not iteration_results: return None diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 28989d2be..e0cc9d9d5 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1,4 +1,5 @@ import json +from datetime import date, timedelta, datetime, timezone from http import HTTPStatus import pytest @@ -25,6 +26,7 @@ from tests.fixtures.builders.model import rule from tests.integration.conftest import UNIQUE_CONSUMER_HEADER +today = lambda: datetime.now(timezone.utc).date() class TestBaseLine: def test_nhs_number_given( @@ -1192,10 +1194,11 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no ( [ # Campaign configs in S3 + # Note: Configs are uploaded in order so the start date would be newer down the order. ("RSV", "RSV_campaign_id_1"), ("RSV", "RSV_campaign_id_2"), - ("RSV", "RSV_campaign_id_3"), ("RSV", "RSV_campaign_id_4"), + ("RSV", "RSV_campaign_id_3"), ("RSV", "inactive_RSV_campaign_id_5", "inactive"), # inactive iteration ("RSV", "RSV_campaign_id_6"), ], @@ -1223,7 +1226,7 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no ], indirect=["campaign_configs", "consumer_mappings"], ) - def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( # noqa : PLR0913 + def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( self, client: FlaskClient, persisted_person: NHSNumber, @@ -1379,3 +1382,137 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa ) ), ) + + @pytest.mark.parametrize( + ("campaign_1_start_date", "campaign_2_start_date", "postcode_for_comparator", "expected_campaign_id"), + [ + ( + ("RSV_campaign_id_1", today()), + ("RSV_campaign_id_2", today() - timedelta(days=1)), + "SW19", # postcode for resulting in not-actionable + "RSV_campaign_id_1", + ), + ( + ("RSV_campaign_id_1", today() - timedelta(days=1)), + ("RSV_campaign_id_2", today()), + "SW19", # postcode for resulting in not-actionable + "RSV_campaign_id_2", + ), + ( + ("RSV_campaign_id_1", today()), + ("RSV_campaign_id_2", today() - timedelta(days=1)), + "M4", # postcode for resulting in actionable + "RSV_campaign_id_1", + ), + ( + ("RSV_campaign_id_1", today() - timedelta(days=1)), + ("RSV_campaign_id_2", today()), + "M4", # postcode for resulting in actionable + "RSV_campaign_id_2", + ), + ], + ) + def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( + self, + client: FlaskClient, + persisted_person_pc_sw19: NHSNumber, + s3_client: BaseClient, + consumer_mapping_bucket: BucketName, + rules_bucket: BucketName, + audit_bucket: BucketName, + secretsmanager_client: BaseClient, # noqa: ARG002 + campaign_1_start_date: tuple[str, date], + campaign_2_start_date: tuple[str, date], + postcode_for_comparator: str, + expected_campaign_id: NHSNumber, + ): + # Given + consumer_id = "consumer-n3bs-jo4hn-ce4na" + headers = {"nhs-login-nhs-number": str(persisted_person_pc_sw19), UNIQUE_CONSUMER_HEADER: consumer_id} + + # Consumer Mapping Data + s3_client.put_object( + Bucket=consumer_mapping_bucket, + Key="consumer_mapping_config.json", + Body=json.dumps( + { + consumer_id: [ + {"CampaignConfigID": "RSV_campaign_id_1"}, + {"CampaignConfigID": "RSV_campaign_id_2"}, + ], + } + ), + ContentType="application/json", + ) + + # Campaign configs + campaign_1 = rule.CampaignConfigFactory.build( + id=campaign_1_start_date[0], + target="RSV", + start_date=campaign_1_start_date[1], + type="V", + iterations=[ + rule.IterationFactory.build( + iteration_rules=[ + rule.PostcodeSuppressionRuleFactory.build( + name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) + ), + ], + iteration_cohorts=[ + rule.IterationCohortFactory.build( + cohort_label="cohort1", + cohort_group="cohort_group1", + positive_description="positive_description", + ) + ], + status_text=None, + ) + ], + ) + + campaign_2 = rule.CampaignConfigFactory.build( + id=campaign_2_start_date[0], + target="RSV", + type="V", + start_date=campaign_2_start_date[1], + iterations=[ + rule.IterationFactory.build( + iteration_rules=[ + rule.PostcodeSuppressionRuleFactory.build( + name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) + ), + ], + iteration_cohorts=[ + rule.IterationCohortFactory.build( + cohort_label="cohort1", + cohort_group="cohort_group1", + positive_description="positive_description", + ) + ], + status_text=None, + ) + ], + ) + + for campaign in [campaign_1, campaign_2]: + s3_client.put_object( + Bucket=rules_bucket, + Key=f"{campaign.id}.json", + Body=json.dumps({"CampaignConfig": campaign.model_dump(by_alias=True)}), + ContentType="application/json", + ) + + # When + client.get(f"/patient-check/{persisted_person_pc_sw19}", headers=headers) + + objects = s3_client.list_objects_v2(Bucket=audit_bucket).get("Contents", []) + object_keys = [obj["Key"] for obj in objects] + latest_key = sorted(object_keys)[-1] + audit_data = json.loads(s3_client.get_object(Bucket=audit_bucket, Key=latest_key)["Body"].read()) + + # Then + if expected_campaign_id is not None: + assert_that(len(audit_data["response"]["condition"]), equal_to(1)) + assert_that(audit_data["response"]["condition"][0].get("campaignId"), equal_to(expected_campaign_id)) + else: + assert_that(len(audit_data["response"]["condition"]), equal_to(0)) From 582308a0e114fc2a0a7cddffa90d1d6cdab7bca4 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 27 Feb 2026 18:50:50 +0000 Subject: [PATCH 02/66] ELI-615 | more linting --- .../in_process/test_eligibility_endpoint.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index e0cc9d9d5..2ff477bb8 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1,5 +1,5 @@ import json -from datetime import date, timedelta, datetime, timezone +from datetime import UTC, date, datetime, timedelta from http import HTTPStatus import pytest @@ -26,7 +26,10 @@ from tests.fixtures.builders.model import rule from tests.integration.conftest import UNIQUE_CONSUMER_HEADER -today = lambda: datetime.now(timezone.utc).date() + +def today(): + return datetime.now(UTC).date() + class TestBaseLine: def test_nhs_number_given( @@ -1226,7 +1229,7 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no ], indirect=["campaign_configs", "consumer_mappings"], ) - def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( + def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( # noqa : PLR0913 self, client: FlaskClient, persisted_person: NHSNumber, @@ -1412,7 +1415,7 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa ), ], ) - def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( + def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( # noqa : PLR0913 self, client: FlaskClient, persisted_person_pc_sw19: NHSNumber, From f8d4987d286dd475faef2ddac9d71b85f6ce883b Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 2 Mar 2026 11:06:48 +0000 Subject: [PATCH 03/66] ELI-615 | revert commit --- .../services/calculators/eligibility_calculator.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 2a071b2bc..b7314bdcc 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -124,9 +124,7 @@ def get_eligibility_status(self, include_actions: str, conditions: list[str], ca return eligibility_status.EligibilityStatus(conditions=final_result) def get_best_iteration_result(self, campaign_group: list[CampaignConfig]) -> BestIterationResult | None: - sorted_campaigns = sorted(campaign_group, key=lambda c: c.start_date, reverse=True) - - iteration_results = self.get_iteration_results(sorted_campaigns) + iteration_results = self.get_iteration_results(campaign_group) if not iteration_results: return None From 039d79c6b10a204132ab5aea63d95726dda00c25 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:03:23 +0000 Subject: [PATCH 04/66] ELI-615 | wip --- .../calculators/eligibility_calculator.py | 6 +-- .../services/processors/campaign_evaluator.py | 38 +++++++++++++------ .../processors/test_campaign_evaluator.py | 16 ++++---- 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index b7314bdcc..ab37a0718 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -81,13 +81,13 @@ def get_the_best_cohort_memberships( return best_status, best_cohorts - def get_eligibility_status(self, include_actions: str, conditions: list[str], category: str) -> EligibilityStatus: + def get_eligibility_status(self, include_actions: str, conditions: list[str], requested_category: str) -> EligibilityStatus: include_actions_flag = include_actions.upper() == "Y" condition_results: dict[ConditionName, IterationResult] = {} final_result = [] - requested_grouped_campaigns = self.campaign_evaluator.get_requested_grouped_campaigns( - self.campaign_configs, conditions, category + requested_grouped_campaigns = self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + self.campaign_configs, conditions, requested_category ) for condition_name, campaign_group in requested_grouped_campaigns: best_iteration_result = self.get_best_iteration_result(campaign_group) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 864d45c8c..2b4f7140a 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -15,29 +15,43 @@ class CampaignEvaluator: def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> list[CampaignConfig]: return [cc for cc in campaign_configs if cc.campaign_live] - def get_requested_grouped_campaigns( - self, campaign_configs: Collection[CampaignConfig], conditions: list[str], category: str - ) -> Iterator[tuple[eligibility_status.ConditionName, list[CampaignConfig]]]: + def get_latest_campaign(self, campaign_group: list[CampaignConfig]): + if not campaign_group: + return None + + latest_date = max(c.start_date for c in campaign_group) + + latest = [c for c in campaign_group if c.start_date == latest_date] + + if len(latest) == 1: + return latest[0] + + if len(latest) > 1: + raise ValueError( + f"Multiple campaigns share the latest start_date: {latest_date}") # TODO handle it in FHIR format + + return None + + def get_campaign_with_latest_active_iteration_per_target( + self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str + ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig]]: mapping = { "ALL": {"V", "S"}, "VACCINATIONS": {"V"}, "SCREENING": {"S"}, } - allowed_types = mapping.get(category, set()) + allowed_types = mapping.get(requested_category, set()) filter_all_conditions = "ALL" in conditions - active_campaigns = self.get_active_campaigns(campaign_configs) + allowed_campaigns = [c for c in campaign_configs if c.type in allowed_types] + active_campaigns = self.get_active_campaigns(allowed_campaigns) for condition_name, campaign_group in groupby( sorted(active_campaigns, key=attrgetter("target")), key=attrgetter("target"), ): - campaigns = list(campaign_group) - if ( - campaigns - and campaigns[0].type in allowed_types - and (filter_all_conditions or str(condition_name) in conditions) - ): - yield condition_name, campaigns + campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] + + yield condition_name, self.get_latest_campaign(campaigns) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index a0b59a53a..1cdcaf737 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -33,12 +33,12 @@ def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 ): campaign = rule.CampaignConfigFactory.build(target=campaign_target, type=campaign_type) - result = campaign_evaluator.get_requested_grouped_campaigns([campaign], conditions_filter, category_filter) + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], conditions_filter, category_filter) assert_that([(str(name), group[0].type) for name, group in result], is_(expected_result)) def test_campaigns_grouped_by_condition_name_with_no_campaigns(campaign_evaluator): - result = campaign_evaluator.get_requested_grouped_campaigns([], ["RSV"], "VACCINATIONS") + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([], ["RSV"], "VACCINATIONS") assert_that(list(result), is_([])) @@ -47,7 +47,7 @@ def test_campaigns_grouped_by_condition_name_with_no_active_campaigns(campaign_e target="RSV", type="V", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) ) - result = campaign_evaluator.get_requested_grouped_campaigns([campaign], ["RSV"], "VACCINATIONS") + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["RSV"], "VACCINATIONS") assert_that(list(result), is_([])) @@ -63,7 +63,7 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( campaign_evaluator, category_filter, campaign_type, expected_count ): campaign = rule.CampaignConfigFactory.build(target="COVID", type=campaign_type) - result = list(campaign_evaluator.get_requested_grouped_campaigns([campaign], ["COVID"], category_filter)) + result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["COVID"], category_filter)) assert_that(len(result), is_(expected_count)) if expected_count > 0: assert_that(str(result[0][0]), is_("COVID")) @@ -71,7 +71,7 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( def test_campaigns_grouped_by_condition_name_with_empty_conditions_filter(campaign_evaluator): campaign = rule.CampaignConfigFactory.build(target="RSV", type="V") - result = campaign_evaluator.get_requested_grouped_campaigns([campaign], [], "VACCINATIONS") + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") assert_that(list(result), is_([])) @@ -84,7 +84,7 @@ def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_ ) all_campaigns = [campaign1, campaign2, campaign3, inactive_campaign] - result = list(campaign_evaluator.get_requested_grouped_campaigns(all_campaigns, ["COVID", "FLU"], "VACCINATIONS")) + result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target(all_campaigns, ["COVID", "FLU"], "VACCINATIONS")) assert_that(len(result), is_(2)) @@ -105,13 +105,13 @@ def test_campaign_grouping_is_affected_by_order_for_mixed_types(campaign_evaluat evaluator_s_first = campaign_evaluator result_s_first = list( - evaluator_s_first.get_requested_grouped_campaigns([campaign_s, campaign_v], ["RSV"], "VACCINATIONS") + evaluator_s_first.get_campaign_with_latest_active_iteration_per_target([campaign_s, campaign_v], ["RSV"], "VACCINATIONS") ) assert_that(result_s_first, is_([])) evaluator_v_first = campaign_evaluator result_v_first = list( - evaluator_v_first.get_requested_grouped_campaigns([campaign_v, campaign_s], ["RSV"], "VACCINATIONS") + evaluator_v_first.get_campaign_with_latest_active_iteration_per_target([campaign_v, campaign_s], ["RSV"], "VACCINATIONS") ) assert_that(len(result_v_first), is_(1)) assert_that(len(result_v_first[0][1]), is_(2)) From d62bca8ff3c3d2f37a83cc96971e544d5c238a7e Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 2 Mar 2026 17:05:31 +0000 Subject: [PATCH 05/66] ELI-615 | wip --- .../calculators/eligibility_calculator.py | 25 +++--------- .../services/processors/campaign_evaluator.py | 39 ++++++++++++------- .../processors/test_campaign_evaluator.py | 26 ++++++++++--- 3 files changed, 52 insertions(+), 38 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index ab37a0718..9f92bc9ad 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -81,7 +81,9 @@ def get_the_best_cohort_memberships( return best_status, best_cohorts - def get_eligibility_status(self, include_actions: str, conditions: list[str], requested_category: str) -> EligibilityStatus: + def get_eligibility_status( + self, include_actions: str, conditions: list[str], requested_category: str + ) -> EligibilityStatus: include_actions_flag = include_actions.upper() == "Y" condition_results: dict[ConditionName, IterationResult] = {} final_result = [] @@ -89,8 +91,8 @@ def get_eligibility_status(self, include_actions: str, conditions: list[str], re requested_grouped_campaigns = self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( self.campaign_configs, conditions, requested_category ) - for condition_name, campaign_group in requested_grouped_campaigns: - best_iteration_result = self.get_best_iteration_result(campaign_group) + for condition_name, campaign in requested_grouped_campaigns: + best_iteration_result = self.get_best_iteration_result(campaign) if best_iteration_result is None: continue @@ -123,23 +125,8 @@ def get_eligibility_status(self, include_actions: str, conditions: list[str], re # Consolidate all the results and return return eligibility_status.EligibilityStatus(conditions=final_result) - def get_best_iteration_result(self, campaign_group: list[CampaignConfig]) -> BestIterationResult | None: - iteration_results = self.get_iteration_results(campaign_group) - if not iteration_results: - return None - - (_best_iteration_name, best_iteration_result) = max( - iteration_results.items(), - key=lambda item: next(iter(item[1].cohort_results.values())).status.value - # Below handles the case where there are no cohort results - if item[1].cohort_results - else -1, - ) - - return best_iteration_result - - def get_iteration_results(self, campaign_group: list[CampaignConfig]) -> dict[IterationName, BestIterationResult]: + def get_iteration_results(self, campaign_group: CampaignConfig) -> dict[IterationName, BestIterationResult]: iteration_results: dict[IterationName, BestIterationResult] = {} for cc in campaign_group: diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 2b4f7140a..ded79dd12 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -15,22 +15,35 @@ class CampaignEvaluator: def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> list[CampaignConfig]: return [cc for cc in campaign_configs if cc.campaign_live] - def get_latest_campaign(self, campaign_group: list[CampaignConfig]): - if not campaign_group: - return None + def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConfig]) -> CampaignConfig: + + """ + Returns the campaign with the latest active iteration date. - latest_date = max(c.start_date for c in campaign_group) + 1. Collect all campaigns with an active iteration. + 2. Sort by iteration date (descending). + 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. + """ + + if not active_campaigns: + return None - latest = [c for c in campaign_group if c.start_date == latest_date] + valid_items = [ + (cc.current_iteration.iteration_date, cc) + for cc in active_campaigns if cc.current_iteration + ] - if len(latest) == 1: - return latest[0] + if not valid_items: + latest_date, latest_campaign = None, None + else: + max_date = max(item[0] for item in valid_items) + cc_with_max_iteration_date = [item for item in valid_items if item[0] == max_date] + if len(cc_with_max_iteration_date) > 1: + raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} campaigns found for date {max_date}") - if len(latest) > 1: - raise ValueError( - f"Multiple campaigns share the latest start_date: {latest_date}") # TODO handle it in FHIR format + latest_date, latest_campaign = cc_with_max_iteration_date[0] - return None + return latest_campaign def get_campaign_with_latest_active_iteration_per_target( self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str @@ -52,6 +65,6 @@ def get_campaign_with_latest_active_iteration_per_target( sorted(active_campaigns, key=attrgetter("target")), key=attrgetter("target"), ): - campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] + filtered_campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] - yield condition_name, self.get_latest_campaign(campaigns) + yield condition_name, self.get_campaign_with_latest_iteration(filtered_campaigns) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index 1cdcaf737..5bdaa27cd 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -33,7 +33,9 @@ def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 ): campaign = rule.CampaignConfigFactory.build(target=campaign_target, type=campaign_type) - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], conditions_filter, category_filter) + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + [campaign], conditions_filter, category_filter + ) assert_that([(str(name), group[0].type) for name, group in result], is_(expected_result)) @@ -47,7 +49,9 @@ def test_campaigns_grouped_by_condition_name_with_no_active_campaigns(campaign_e target="RSV", type="V", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) ) - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["RSV"], "VACCINATIONS") + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + [campaign], ["RSV"], "VACCINATIONS" + ) assert_that(list(result), is_([])) @@ -63,7 +67,9 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( campaign_evaluator, category_filter, campaign_type, expected_count ): campaign = rule.CampaignConfigFactory.build(target="COVID", type=campaign_type) - result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["COVID"], category_filter)) + result = list( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["COVID"], category_filter) + ) assert_that(len(result), is_(expected_count)) if expected_count > 0: assert_that(str(result[0][0]), is_("COVID")) @@ -84,7 +90,11 @@ def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_ ) all_campaigns = [campaign1, campaign2, campaign3, inactive_campaign] - result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target(all_campaigns, ["COVID", "FLU"], "VACCINATIONS")) + result = list( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + all_campaigns, ["COVID", "FLU"], "VACCINATIONS" + ) + ) assert_that(len(result), is_(2)) @@ -105,13 +115,17 @@ def test_campaign_grouping_is_affected_by_order_for_mixed_types(campaign_evaluat evaluator_s_first = campaign_evaluator result_s_first = list( - evaluator_s_first.get_campaign_with_latest_active_iteration_per_target([campaign_s, campaign_v], ["RSV"], "VACCINATIONS") + evaluator_s_first.get_campaign_with_latest_active_iteration_per_target( + [campaign_s, campaign_v], ["RSV"], "VACCINATIONS" + ) ) assert_that(result_s_first, is_([])) evaluator_v_first = campaign_evaluator result_v_first = list( - evaluator_v_first.get_campaign_with_latest_active_iteration_per_target([campaign_v, campaign_s], ["RSV"], "VACCINATIONS") + evaluator_v_first.get_campaign_with_latest_active_iteration_per_target( + [campaign_v, campaign_s], ["RSV"], "VACCINATIONS" + ) ) assert_that(len(result_v_first), is_(1)) assert_that(len(result_v_first[0][1]), is_(2)) From 7a68a628c9f9a8444e83e1896066dbbfae30ff20 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 09:38:02 +0000 Subject: [PATCH 06/66] ELI-615 | wip --- .../services/processors/campaign_evaluator.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index ded79dd12..2e1e435ad 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -15,7 +15,7 @@ class CampaignEvaluator: def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> list[CampaignConfig]: return [cc for cc in campaign_configs if cc.campaign_live] - def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConfig]) -> CampaignConfig: + def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConfig]) -> CampaignConfig | None: """ Returns the campaign with the latest active iteration date. @@ -34,14 +34,15 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf ] if not valid_items: - latest_date, latest_campaign = None, None + latest_campaign = None else: max_date = max(item[0] for item in valid_items) - cc_with_max_iteration_date = [item for item in valid_items if item[0] == max_date] + cc_with_max_iteration_date:list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] if len(cc_with_max_iteration_date) > 1: - raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} campaigns found for date {max_date}") + raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations for target {cc_with_max_iteration_date[0].iteration_date}" + f"found for date {max_date}") - latest_date, latest_campaign = cc_with_max_iteration_date[0] + latest_campaign = cc_with_max_iteration_date[0] return latest_campaign From f85348bbf317a276104eb3f0bd2df4940a77c1ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 26 Feb 2026 16:49:21 +0000 Subject: [PATCH 07/66] Bump werkzeug from 3.1.5 to 3.1.6 Bumps [werkzeug](https://github.com/pallets/werkzeug) from 3.1.5 to 3.1.6. - [Release notes](https://github.com/pallets/werkzeug/releases) - [Changelog](https://github.com/pallets/werkzeug/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/werkzeug/compare/3.1.5...3.1.6) --- updated-dependencies: - dependency-name: werkzeug dependency-version: 3.1.6 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1f2455bca..c59d3c799 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3184,14 +3184,14 @@ files = [ [[package]] name = "werkzeug" -version = "3.1.5" +version = "3.1.6" description = "The comprehensive WSGI web application library." optional = false python-versions = ">=3.9" groups = ["main", "dev"] files = [ - {file = "werkzeug-3.1.5-py3-none-any.whl", hash = "sha256:5111e36e91086ece91f93268bb39b4a35c1e6f1feac762c9c822ded0a4e322dc"}, - {file = "werkzeug-3.1.5.tar.gz", hash = "sha256:6a548b0e88955dd07ccb25539d7d0cc97417ee9e179677d22c7041c8f078ce67"}, + {file = "werkzeug-3.1.6-py3-none-any.whl", hash = "sha256:7ddf3357bb9564e407607f988f683d72038551200c704012bb9a4c523d42f131"}, + {file = "werkzeug-3.1.6.tar.gz", hash = "sha256:210c6bede5a420a913956b4791a7f4d6843a43b6fcee4dfa08a65e93007d0d25"}, ] [package.dependencies] From a67ab1a1bee3a4fd8b5555ccaf644e6770d1489c Mon Sep 17 00:00:00 2001 From: oneeb-nhs <258801025+oneeb-nhs@users.noreply.github.com> Date: Mon, 2 Mar 2026 13:59:08 +0000 Subject: [PATCH 08/66] Updated not_member_of operator to NotMemberOf (#594) --- src/eligibility_signposting_api/model/campaign_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 73199dc07..c2bdd6a8f 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -73,7 +73,7 @@ class RuleOperator(StrEnum): is_in = "in" not_in = "not_in" member_of = "MemberOf" - not_member_of = "NotaMemberOf" + not_member_of = "NotMemberOf" is_null = "is_null" is_not_null = "is_not_null" is_between = "between" From b8e4e34c9a4ff48aaa1ad5599cb38f0301b8c029 Mon Sep 17 00:00:00 2001 From: Robert Bailiff Date: Tue, 3 Mar 2026 10:26:20 +0000 Subject: [PATCH 09/66] Added vulture to workflows (#585) * Added vulture to workflows * Added new make commands and added to project * Added updated lockfile * Minimal config with no errors * Corrected vulture commands * Generating new lock file --- .github/actions/check-dead-code/action.yaml | 19 +++++++++++++++++++ .github/workflows/stage-1-commit.yaml | 10 ++++++++++ Makefile | 5 ++++- poetry.lock | 16 ++++++++++++++-- pyproject.toml | 7 +++++++ 5 files changed, 54 insertions(+), 3 deletions(-) create mode 100644 .github/actions/check-dead-code/action.yaml diff --git a/.github/actions/check-dead-code/action.yaml b/.github/actions/check-dead-code/action.yaml new file mode 100644 index 000000000..0e595052b --- /dev/null +++ b/.github/actions/check-dead-code/action.yaml @@ -0,0 +1,19 @@ +name: "Check Dead Code" +description: "Runs Vulture to detect unused Python code." + +runs: + using: "composite" + steps: + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: "3.13" + + - name: Install dependencies + shell: bash + run: make dependencies install-python + + - name: Run Vulture + shell: bash + run: poetry run vulture + diff --git a/.github/workflows/stage-1-commit.yaml b/.github/workflows/stage-1-commit.yaml index 36a15737d..86c53ed5c 100644 --- a/.github/workflows/stage-1-commit.yaml +++ b/.github/workflows/stage-1-commit.yaml @@ -157,3 +157,13 @@ jobs: uses: actions/checkout@v6 - name: "Run OWASP Dependency Scan" uses: ./.github/actions/owasp-dependency-scan + check-dead-code: + name: "Check for dead code" + runs-on: ubuntu-latest + timeout-minutes: 2 + steps: + - name: "Checkout code" + uses: actions/checkout@v6 + - name: "Check for dead code" + uses: ./.github/actions/check-dead-code + diff --git a/Makefile b/Makefile index 4cd4d9fc1..342b5c14b 100644 --- a/Makefile +++ b/Makefile @@ -28,6 +28,9 @@ format: ## Format and fix code format_lint: format lint +vulture: + poetry run vulture + #Files to loop over in release _dist_include="pytest.ini poetry.lock poetry.toml pyproject.toml Makefile build/. tests" @@ -52,7 +55,7 @@ config:: # Configure development environment (main) @Configuration # TODO: Use only 'make' targets that are specific to this project, e.g. you may not need to install Node.js make _install-dependencies -precommit: test-unit build test-integration lint ## Pre-commit tasks +precommit: test-unit build test-integration lint vulture ## Pre-commit tasks python -m this # ============================================================================== diff --git a/poetry.lock b/poetry.lock index c59d3c799..bace7d18e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -3170,6 +3170,18 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] +[[package]] +name = "vulture" +version = "2.14" +description = "Find dead code" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "vulture-2.14-py2.py3-none-any.whl", hash = "sha256:d9a90dba89607489548a49d557f8bac8112bd25d3cbc8aeef23e860811bd5ed9"}, + {file = "vulture-2.14.tar.gz", hash = "sha256:cb8277902a1138deeab796ec5bef7076a6e0248ca3607a3f3dee0b6d9e9b8415"}, +] + [[package]] name = "wcwidth" version = "0.2.13" @@ -3456,4 +3468,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "c5064b43e402173391286c84cff772c1776fdf816a8fbd229cfdafa26da4b456" +content-hash = "4456e8d9141a4581c9fc2a1bda3c779fe194359c2d5a1588fe180563afb9b2b6" diff --git a/pyproject.toml b/pyproject.toml index 4fbaa3f8e..086a0ebce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,6 +63,8 @@ python-dotenv = "^1.2.1" openapi-spec-validator = "^0.7.2" pip-licenses = "^5.5.0" cachetools = "^7.0.1" +vulture = "^2.14" + [tool.poetry-plugin-lambda-build] docker-image = "public.ecr.aws/sam/build-python3.13:1.139-x86_64" # See https://gallery.ecr.aws/search?searchTerm=%22python%22&architecture=x86-64&popularRegistries=amazon&verified=verified&operatingSystems=Linux @@ -114,3 +116,8 @@ exclude_lines = [ "if TYPE_CHECKING:", "raise NotImplementedError", ] + +[tool.vulture] +min_confidence = 80 +paths = ["src/", "tests/"] +ignore_names = ["secretsmanager_client", "consumer_*", "rule_processor_instance"] From 15895204d4915184857b09c4c61f7e58d005c08d Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 11:47:32 +0000 Subject: [PATCH 10/66] ELI-615 | modified iterations_result to iteration result --- .../calculators/eligibility_calculator.py | 47 ++++++++----------- .../services/processors/campaign_evaluator.py | 22 +++++---- 2 files changed, 34 insertions(+), 35 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 9f92bc9ad..01a75229e 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -88,15 +88,15 @@ def get_eligibility_status( condition_results: dict[ConditionName, IterationResult] = {} final_result = [] - requested_grouped_campaigns = self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + requested_cc_with_active_iteration = self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( self.campaign_configs, conditions, requested_category ) - for condition_name, campaign in requested_grouped_campaigns: - best_iteration_result = self.get_best_iteration_result(campaign) - - if best_iteration_result is None: + for condition_name, campaign in requested_cc_with_active_iteration: + if campaign is None: continue + best_iteration_result = self.get_iteration_result(campaign) + matched_action_detail = self.action_rule_handler.get_actions( self.person, best_iteration_result.active_iteration, @@ -126,31 +126,24 @@ def get_eligibility_status( return eligibility_status.EligibilityStatus(conditions=final_result) - def get_iteration_results(self, campaign_group: CampaignConfig) -> dict[IterationName, BestIterationResult]: - iteration_results: dict[IterationName, BestIterationResult] = {} + def get_iteration_result(self, campaign_with_active_iteration: CampaignConfig) -> BestIterationResult: - for cc in campaign_group: - try: - active_iteration = cc.current_iteration - except StopIteration: - logger.info("Skipping campaign ID %s as no active iteration was found.", cc.id) - continue - cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( - self.person, active_iteration - ) + active_iteration = campaign_with_active_iteration.active_iteration + cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( + self.person, active_iteration + ) - # Determine Result between cohorts - get the best - status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) - status_text = self.get_status_text(active_iteration.status_text, ConditionName(cc.target), status) + # Determine Result between cohorts - get the best + status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) + status_text = self.get_status_text(active_iteration.status_text, ConditionName(campaign_with_active_iteration.target), status) - iteration_results[active_iteration.name] = BestIterationResult( - IterationResult(status, status_text, best_cohorts, []), - active_iteration, - cc.id, - cc.version, - cohort_results, - ) - return iteration_results + return BestIterationResult( + IterationResult(status, status_text, best_cohorts, []), + active_iteration, + campaign_with_active_iteration.id, + campaign_with_active_iteration.version, + cohort_results, + ) @staticmethod def get_status_text( diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 2e1e435ad..613923481 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -1,3 +1,4 @@ +import logging from collections.abc import Collection, Iterator from itertools import groupby from operator import attrgetter @@ -7,6 +8,7 @@ from eligibility_signposting_api.model import eligibility_status from eligibility_signposting_api.model.campaign_config import CampaignConfig +logger = logging.getLogger(__name__) @service class CampaignEvaluator: @@ -25,13 +27,16 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. """ - if not active_campaigns: - return None + valid_items = [] - valid_items = [ - (cc.current_iteration.iteration_date, cc) - for cc in active_campaigns if cc.current_iteration - ] + for cc in active_campaigns: + if cc.current_iteration: + valid_items.append((cc.current_iteration.iteration_date, cc)) + else: + logger.info( + "Skipping campaign ID %s as no active iteration was found.", + cc.id, + ) if not valid_items: latest_campaign = None @@ -39,14 +44,15 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf max_date = max(item[0] for item in valid_items) cc_with_max_iteration_date:list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] if len(cc_with_max_iteration_date) > 1: - raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations for target {cc_with_max_iteration_date[0].iteration_date}" + raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations " + f"for target {cc_with_max_iteration_date[0].iteration_date}" f"found for date {max_date}") latest_campaign = cc_with_max_iteration_date[0] return latest_campaign - def get_campaign_with_latest_active_iteration_per_target( + def get_campaign_with_latest_active_iteration_per_target( self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig]]: mapping = { From e97651d29937e24787137e20536cf1b02222cc78 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 12:19:35 +0000 Subject: [PATCH 11/66] ELI-615 | fix - naming issues | handle stop iter exception --- .../services/calculators/eligibility_calculator.py | 2 +- .../services/processors/campaign_evaluator.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 01a75229e..aea1afe84 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -128,7 +128,7 @@ def get_eligibility_status( def get_iteration_result(self, campaign_with_active_iteration: CampaignConfig) -> BestIterationResult: - active_iteration = campaign_with_active_iteration.active_iteration + active_iteration = campaign_with_active_iteration.current_iteration cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( self.person, active_iteration ) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 613923481..d2534a804 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -30,9 +30,9 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf valid_items = [] for cc in active_campaigns: - if cc.current_iteration: + try: valid_items.append((cc.current_iteration.iteration_date, cc)) - else: + except StopIteration: logger.info( "Skipping campaign ID %s as no active iteration was found.", cc.id, From 5aecbef366afbeed56ad1dcc733034b56ad52f71 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 14:46:02 +0000 Subject: [PATCH 12/66] ELI-615 | campaign_configs - fixture updated | test case fixed --- .../services/processors/campaign_evaluator.py | 2 +- tests/integration/conftest.py | 13 ++++++----- .../in_process/test_eligibility_endpoint.py | 22 ++++++++++++------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index d2534a804..7c5968a7d 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -45,7 +45,7 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf cc_with_max_iteration_date:list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] if len(cc_with_max_iteration_date) > 1: raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations " - f"for target {cc_with_max_iteration_date[0].iteration_date}" + f"for target {cc_with_max_iteration_date[0].current_iteration.iteration_date}" f"found for date {max_date}") latest_campaign = cc_with_max_iteration_date[0] diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 4293be080..be2ac01cf 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1188,12 +1188,14 @@ def campaign_configs(request, s3_client: BaseClient, rules_bucket: BucketName) - targets = [] campaign_id = [] - status = [] + iteration_status = [] + iteration_date = [] for t, _id, *rest in raw: targets.append(t) campaign_id.append(_id) - status.append(rest[0] if rest else None) + iteration_status.append(rest[0] if rest else None) + iteration_date.append(rest[1] if rest else None) for i in range(len(targets)): campaign: CampaignConfig = rule.CampaignConfigFactory.build( @@ -1221,8 +1223,9 @@ def campaign_configs(request, s3_client: BaseClient, rules_bucket: BucketName) - ], ) - if status[i] == "inactive": - campaign.iterations[0].iteration_date = datetime.datetime.now(tz=datetime.UTC) + datetime.timedelta(days=7) + # Update iteration date + if iteration_status[i]: + campaign.iterations[0].iteration_date = iteration_date[i] campaign_data = {"CampaignConfig": campaign.model_dump(by_alias=True)} key = f"{campaign.name}.json" @@ -1304,7 +1307,7 @@ def consumer_to_active_rsv_campaign_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def consumer_to_active_campaign_having_and_rule_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 2ff477bb8..466f0e575 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -30,6 +30,12 @@ def today(): return datetime.now(UTC).date() +def yesterday(): + return datetime.now(UTC).date()- timedelta(days=1) + +def tomorrow(): + return datetime.now(UTC).date()+ timedelta(days=1) + class TestBaseLine: def test_nhs_number_given( @@ -1196,14 +1202,14 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no [ ( [ - # Campaign configs in S3 - # Note: Configs are uploaded in order so the start date would be newer down the order. - ("RSV", "RSV_campaign_id_1"), - ("RSV", "RSV_campaign_id_2"), - ("RSV", "RSV_campaign_id_4"), - ("RSV", "RSV_campaign_id_3"), - ("RSV", "inactive_RSV_campaign_id_5", "inactive"), # inactive iteration - ("RSV", "RSV_campaign_id_6"), + # Creates campaign configs by [target, campaign id, iteration status, iteration date] + ("RSV", "RSV_campaign_id_1", "active", today()), + ("RSV", "RSV_campaign_id_2", "active",today()), + ("RSV", "RSV_campaign_id_3", "active", today()), + ("RSV", "RSV_campaign_id_4", "active", yesterday()), + # inactive iteration + ("RSV", "inactive_RSV_campaign_id_5", "inactive", tomorrow()), + ("RSV", "RSV_campaign_id_6", "active", today()), ], { # Consumer mappings in S3 From 29e566f07a087c306d0d5e2cce2d362855f6bfb6 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:11:33 +0000 Subject: [PATCH 13/66] ELI-615 | fix flaky tests do to fixture scope --- tests/integration/conftest.py | 2 +- tests/integration/in_process/test_eligibility_endpoint.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index be2ac01cf..8bdafd4e3 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -738,7 +738,7 @@ def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) - s3_client.delete_object(Bucket=rules_bucket, Key=key) -@pytest.fixture(scope="class") +@pytest.fixture(scope="function") def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 466f0e575..b5ba1868a 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1421,7 +1421,7 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa ), ], ) - def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( # noqa : PLR0913 + def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaign_with_diff_iteration_date( # noqa : PLR0913 self, client: FlaskClient, persisted_person_pc_sw19: NHSNumber, From 5e2cf1c9bcb1420b04c9b5491971907819e1765b Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:39:14 +0000 Subject: [PATCH 14/66] ELI-615 | fix flaky tests - removed best status test --- tests/integration/conftest.py | 26 ++-- .../in_process/test_eligibility_endpoint.py | 121 ------------------ 2 files changed, 13 insertions(+), 134 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 8bdafd4e3..41098728a 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -586,7 +586,7 @@ def firehose_delivery_stream(firehose_client: BaseClient, audit_bucket: BucketNa return firehose_client.describe_delivery_stream(DeliveryStreamName=stream_name) -@pytest.fixture(scope="class") +@pytest.fixture def rsv_campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -696,7 +696,7 @@ def campaign_config_with_rules_having_rule_mapper( s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: campaigns, campaign_data_keys = [], [] @@ -738,7 +738,7 @@ def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) - s3_client.delete_object(Bucket=rules_bucket, Key=key) -@pytest.fixture(scope="function") +@pytest.fixture def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -776,7 +776,7 @@ def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketNam s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_tokens(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -843,7 +843,7 @@ def campaign_config_with_tokens(s3_client: BaseClient, rules_bucket: BucketName) s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_invalid_tokens(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -1057,7 +1057,7 @@ def campaign_config_with_custom_target_attributes( s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: """Create and upload multiple campaign configs to S3, then clean up after tests.""" campaigns, campaign_data_keys = [], [] @@ -1121,7 +1121,7 @@ def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) - s3_client.delete_object(Bucket=rules_bucket, Key=key) -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_virtual_cohort(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="COVID", @@ -1144,7 +1144,7 @@ def campaign_config_with_virtual_cohort(s3_client: BaseClient, rules_bucket: Buc s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_missing_descriptions_missing_rule_text( s3_client: BaseClient, rules_bucket: BucketName ) -> Generator[CampaignConfig]: @@ -1265,7 +1265,7 @@ def create_and_put_consumer_mapping_in_s3( return consumer_mapping -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_active_campaign_having_invalid_tokens_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1279,7 +1279,7 @@ def consumer_to_active_campaign_having_invalid_tokens_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_active_campaign_having_tokens_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1293,7 +1293,7 @@ def consumer_to_active_campaign_having_tokens_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_active_rsv_campaign_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1307,7 +1307,7 @@ def consumer_to_active_rsv_campaign_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="function") +@pytest.fixture def consumer_to_active_campaign_having_and_rule_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1456,7 +1456,7 @@ def consumer_to_campaign_having_inactive_iteration_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_multiple_campaign_configs_mapping( multiple_campaign_configs: list[CampaignConfig], consumer_id: ConsumerId, diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index b5ba1868a..3c23c5a69 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1270,127 +1270,6 @@ def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_e else: assert_that(len(audit_data["response"]["condition"]), equal_to(0)) - def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target( # noqa : PLR0913 - self, - client: FlaskClient, - persisted_person_pc_sw19: NHSNumber, - s3_client: BaseClient, - consumer_mapping_bucket: BucketName, - rules_bucket: BucketName, - secretsmanager_client: BaseClient, # noqa: ARG002 - ): - # Given - consumer_id = "consumer-n3bs-jo4hn-ce4na" - headers = {"nhs-login-nhs-number": str(persisted_person_pc_sw19), UNIQUE_CONSUMER_HEADER: consumer_id} - - # Consumer Mapping Data - s3_client.put_object( - Bucket=consumer_mapping_bucket, - Key="consumer_mapping_config.json", - Body=json.dumps( - { - consumer_id: [ - {"CampaignConfigID": "RSV_campaign_id_not_actionable"}, - {"CampaignConfigID": "RSV_campaign_id_actionable"}, - ], - } - ), - ContentType="application/json", - ) - - # Campaign configs - campaign_1 = rule.CampaignConfigFactory.build( - id="RSV_campaign_id_not_actionable", - target="RSV", - type="V", - iterations=[ - rule.IterationFactory.build( - iteration_rules=[ - rule.PostcodeSuppressionRuleFactory.build(name="Exclude SW19", description=""), - ], - iteration_cohorts=[ - rule.IterationCohortFactory.build( - cohort_label="cohort1", - cohort_group="cohort_group1", - positive_description="positive_description", - ) - ], - status_text=None, - ) - ], - ) - - campaign_2 = rule.CampaignConfigFactory.build( - id="RSV_campaign_id_actionable", - target="RSV", - type="V", - iterations=[ - rule.IterationFactory.build( - iteration_rules=[ - rule.PostcodeSuppressionRuleFactory.build(name="Exclude M4", comparator=RuleComparator("M4")), - ], - iteration_cohorts=[ - rule.IterationCohortFactory.build( - cohort_label="cohort1", - cohort_group="cohort_group1", - positive_description="positive_description", - ) - ], - status_text=None, - ) - ], - ) - - for campaign in [campaign_1, campaign_2]: - s3_client.put_object( - Bucket=rules_bucket, - Key=f"{campaign.id}.json", - Body=json.dumps({"CampaignConfig": campaign.model_dump(by_alias=True)}), - ContentType="application/json", - ) - - # When - response = client.get(f"/patient-check/{persisted_person_pc_sw19}?includeActions=Y", headers=headers) - - # Then - assert_that( - response, - is_response() - .with_status_code(HTTPStatus.OK) - .and_text( - is_json_that( - has_entry( - "processedSuggestions", - equal_to( - [ - { - "condition": "RSV", - "status": "Actionable", - "eligibilityCohorts": [ - { - "cohortCode": "cohort_group1", - "cohortStatus": "Actionable", - "cohortText": "positive_description", - } - ], - "actions": [ - { - "actionCode": "action_code", - "actionType": "defaultcomms", - "description": "", - "urlLabel": "", - "urlLink": "", - } - ], - "suitabilityRules": [], - "statusText": "You should have the RSV vaccine", - } - ] - ), - ) - ) - ), - ) @pytest.mark.parametrize( ("campaign_1_start_date", "campaign_2_start_date", "postcode_for_comparator", "expected_campaign_id"), From 4679ca1c604bf6a3b3442d68ef2ca0de1f1fd590 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:56:56 +0000 Subject: [PATCH 15/66] ELI-615 | used raw campagin config for tests using iteration dates --- tests/integration/in_process/test_eligibility_endpoint.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 3c23c5a69..490f45950 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1334,13 +1334,14 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig ) # Campaign configs - campaign_1 = rule.CampaignConfigFactory.build( + campaign_1 = rule.RawCampaignConfigFactory.build( id=campaign_1_start_date[0], target="RSV", start_date=campaign_1_start_date[1], type="V", iterations=[ rule.IterationFactory.build( + iteration_date=campaign_1_start_date[1], iteration_rules=[ rule.PostcodeSuppressionRuleFactory.build( name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) @@ -1358,13 +1359,14 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig ], ) - campaign_2 = rule.CampaignConfigFactory.build( + campaign_2 = rule.RawCampaignConfigFactory.build( id=campaign_2_start_date[0], target="RSV", type="V", start_date=campaign_2_start_date[1], iterations=[ rule.IterationFactory.build( + iteration_date=campaign_2_start_date[1], iteration_rules=[ rule.PostcodeSuppressionRuleFactory.build( name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) From a1ec3f6616777f4e897c8f4cd4c403130dd56ac8 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 16:40:52 +0000 Subject: [PATCH 16/66] ELI-615 | fix - campaign group is used correctly --- .../services/processors/campaign_evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 7c5968a7d..915b7056e 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -72,6 +72,6 @@ def get_campaign_with_latest_active_iteration_per_target( sorted(active_campaigns, key=attrgetter("target")), key=attrgetter("target"), ): - filtered_campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] + filtered_campaigns = [c for c in campaign_group if filter_all_conditions or str(condition_name) in conditions] yield condition_name, self.get_campaign_with_latest_iteration(filtered_campaigns) From ae0d2c7651eac74df036257b4165fbcb32377637 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 16:59:43 +0000 Subject: [PATCH 17/66] ELI-615 | fix test_campaigns_grouped_by_condition_name_filters_correctly --- .../processors/test_campaign_evaluator.py | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index 5bdaa27cd..8c56e21a1 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -16,16 +16,16 @@ def campaign_evaluator(): @pytest.mark.parametrize( ("campaign_target", "campaign_type", "conditions_filter", "category_filter", "expected_result"), [ - ("RSV", "V", ["RSV"], "VACCINATIONS", [("RSV", "V")]), - ("RSV", "V", ["COVID"], "VACCINATIONS", []), - ("RSV", "S", ["RSV"], "ALL", [("RSV", "S")]), - ("RSV", "S", ["ALL"], "ALL", [("RSV", "S")]), - ("RSV", "S", ["RSV"], "VACCINATIONS", []), - ("RSV", "V", ["RSV"], "ALL", [("RSV", "V")]), - ("FLU", "V", ["COVID", "RSV"], "ALL", []), - ("FLU", "S", ["ALL"], "ALL", [("FLU", "S")]), - ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", []), - ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", [("FLU", "V")]), + ("RSV", "V", ["RSV"], "VACCINATIONS", ("RSV", "V")), + ("RSV", "V", ["COVID"], "VACCINATIONS", None), + ("RSV", "S", ["RSV"], "ALL", ("RSV", "S")), + ("RSV", "S", ["ALL"], "ALL", ("RSV", "S")), + ("RSV", "S", ["RSV"], "VACCINATIONS", None ), + ("RSV", "V", ["RSV"], "ALL", ("RSV", "V")), + ("FLU", "V", ["COVID", "RSV"], "ALL", None), + ("FLU", "S", ["ALL"], "ALL", ("FLU", "S")), + ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", None), + ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", ("FLU", "V")), ], ) def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 @@ -36,7 +36,12 @@ def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( [campaign], conditions_filter, category_filter ) - assert_that([(str(name), group[0].type) for name, group in result], is_(expected_result)) + + actual = next( + ((str(name), camp.type) for name, camp in result if camp is not None), + None + ) + assert actual == expected_result def test_campaigns_grouped_by_condition_name_with_no_campaigns(campaign_evaluator): From 39ad10773b5a4e422e2c58cfff5e71cba573c79c Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 17:36:30 +0000 Subject: [PATCH 18/66] ELI-615 | fix tests --- .../processors/test_campaign_evaluator.py | 40 +++++++++++-------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index 8c56e21a1..066ed2bb6 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -83,12 +83,20 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( def test_campaigns_grouped_by_condition_name_with_empty_conditions_filter(campaign_evaluator): campaign = rule.CampaignConfigFactory.build(target="RSV", type="V") result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") - assert_that(list(result), is_([])) + + actual = [(name, camp) for name, camp in result][0] + assert_that(actual, is_(("RSV", None))) def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_target(campaign_evaluator): - campaign1 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C1") - campaign2 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C2") + + # providing the start_date here, because CampaignConfigFactory used it for iteration_date + campaign1 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C1", start_date = datetime.datetime.now( + datetime.UTC).date() - datetime.timedelta(days=1), iterations=[ + rule.IterationFactory.build()]) + campaign2 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C2", start_date = datetime.datetime.now( + datetime.UTC).date(), iterations=[ + rule.IterationFactory.build()]) campaign3 = rule.CampaignConfigFactory.build(target="FLU", type="V", id="F1") inactive_campaign = rule.CampaignConfigFactory.build( target="COVID", type="V", id="C3", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) @@ -103,34 +111,34 @@ def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_ assert_that(len(result), is_(2)) - result_dict = {str(name): campaigns for name, campaigns in result} + result_dict = {str(name): campaign for name, campaign in result} + assert_that("COVID" in result_dict) assert_that("FLU" in result_dict) - assert_that(len(result_dict["COVID"]), is_(2)) - assert_that({c.id for c in result_dict["COVID"]}, is_({CampaignID("C1"), CampaignID("C2")})) + assert_that(result_dict["COVID"].id, is_(CampaignID("C2"))) + assert_that(result_dict["FLU"].id, is_(CampaignID("F1"))) - assert_that(len(result_dict["FLU"]), is_(1)) - assert_that(result_dict["FLU"][0].id, is_(CampaignID("F1"))) - -def test_campaign_grouping_is_affected_by_order_for_mixed_types(campaign_evaluator): +def test_campaign_grouping_is_not_affected_by_order_for_mixed_types(campaign_evaluator): campaign_v = rule.CampaignConfigFactory.build(target="RSV", type="V") campaign_s = rule.CampaignConfigFactory.build(target="RSV", type="S") - evaluator_s_first = campaign_evaluator + # Order: S then V result_s_first = list( - evaluator_s_first.get_campaign_with_latest_active_iteration_per_target( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( [campaign_s, campaign_v], ["RSV"], "VACCINATIONS" ) ) - assert_that(result_s_first, is_([])) + # Even if S is first, it is filtered out by 'allowed_types' + assert_that(len(result_s_first), is_(1)) + assert_that(result_s_first[0][1].type, is_("V")) - evaluator_v_first = campaign_evaluator + # Order: V then S result_v_first = list( - evaluator_v_first.get_campaign_with_latest_active_iteration_per_target( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( [campaign_v, campaign_s], ["RSV"], "VACCINATIONS" ) ) assert_that(len(result_v_first), is_(1)) - assert_that(len(result_v_first[0][1]), is_(2)) + assert_that(result_v_first[0][1].type, is_("V")) From da3657bee3f9a5557c08fe945836221b57940073 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 10:18:48 +0000 Subject: [PATCH 19/66] ELI-615 | linting --- .../calculators/eligibility_calculator.py | 13 ++++---- .../services/processors/campaign_evaluator.py | 29 ++++++++++-------- .../in_process/test_eligibility_endpoint.py | 9 +++--- .../processors/test_campaign_evaluator.py | 30 +++++++++++-------- 4 files changed, 46 insertions(+), 35 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index aea1afe84..92b0ced7d 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -32,7 +32,6 @@ from eligibility_signposting_api.model.campaign_config import ( CampaignConfig, CohortLabel, - IterationName, ) from eligibility_signposting_api.model.person import Person @@ -88,8 +87,10 @@ def get_eligibility_status( condition_results: dict[ConditionName, IterationResult] = {} final_result = [] - requested_cc_with_active_iteration = self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - self.campaign_configs, conditions, requested_category + requested_cc_with_active_iteration = ( + self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + self.campaign_configs, conditions, requested_category + ) ) for condition_name, campaign in requested_cc_with_active_iteration: if campaign is None: @@ -125,9 +126,7 @@ def get_eligibility_status( # Consolidate all the results and return return eligibility_status.EligibilityStatus(conditions=final_result) - def get_iteration_result(self, campaign_with_active_iteration: CampaignConfig) -> BestIterationResult: - active_iteration = campaign_with_active_iteration.current_iteration cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( self.person, active_iteration @@ -135,7 +134,9 @@ def get_iteration_result(self, campaign_with_active_iteration: CampaignConfig) - # Determine Result between cohorts - get the best status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) - status_text = self.get_status_text(active_iteration.status_text, ConditionName(campaign_with_active_iteration.target), status) + status_text = self.get_status_text( + active_iteration.status_text, ConditionName(campaign_with_active_iteration.target), status + ) return BestIterationResult( IterationResult(status, status_text, best_cohorts, []), diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 915b7056e..119c601ef 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -10,6 +10,7 @@ logger = logging.getLogger(__name__) + @service class CampaignEvaluator: """Filters and groups campaign configurations.""" @@ -18,13 +19,12 @@ def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> return [cc for cc in campaign_configs if cc.campaign_live] def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConfig]) -> CampaignConfig | None: - """ - Returns the campaign with the latest active iteration date. + Returns the campaign with the latest active iteration date. - 1. Collect all campaigns with an active iteration. - 2. Sort by iteration date (descending). - 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. + 1. Collect all campaigns with an active iteration. + 2. Sort by iteration date (descending). + 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. """ valid_items = [] @@ -42,19 +42,22 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf latest_campaign = None else: max_date = max(item[0] for item in valid_items) - cc_with_max_iteration_date:list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] + cc_with_max_iteration_date: list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] if len(cc_with_max_iteration_date) > 1: - raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations " - f"for target {cc_with_max_iteration_date[0].current_iteration.iteration_date}" - f"found for date {max_date}") + err_msg = ( + f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations " + f"for target {cc_with_max_iteration_date[0].current_iteration.iteration_date}" + f"found for date {max_date}" + ) + raise ValueError(err_msg) latest_campaign = cc_with_max_iteration_date[0] return latest_campaign - def get_campaign_with_latest_active_iteration_per_target( + def get_campaign_with_latest_active_iteration_per_target( self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str - ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig]]: + ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig | None]]: mapping = { "ALL": {"V", "S"}, "VACCINATIONS": {"V"}, @@ -72,6 +75,8 @@ def get_campaign_with_latest_active_iteration_per_target( sorted(active_campaigns, key=attrgetter("target")), key=attrgetter("target"), ): - filtered_campaigns = [c for c in campaign_group if filter_all_conditions or str(condition_name) in conditions] + filtered_campaigns = [ + c for c in campaign_group if filter_all_conditions or str(condition_name) in conditions + ] yield condition_name, self.get_campaign_with_latest_iteration(filtered_campaigns) diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 490f45950..e40847bbe 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -30,11 +30,13 @@ def today(): return datetime.now(UTC).date() + def yesterday(): - return datetime.now(UTC).date()- timedelta(days=1) + return datetime.now(UTC).date() - timedelta(days=1) + def tomorrow(): - return datetime.now(UTC).date()+ timedelta(days=1) + return datetime.now(UTC).date() + timedelta(days=1) class TestBaseLine: @@ -1204,7 +1206,7 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no [ # Creates campaign configs by [target, campaign id, iteration status, iteration date] ("RSV", "RSV_campaign_id_1", "active", today()), - ("RSV", "RSV_campaign_id_2", "active",today()), + ("RSV", "RSV_campaign_id_2", "active", today()), ("RSV", "RSV_campaign_id_3", "active", today()), ("RSV", "RSV_campaign_id_4", "active", yesterday()), # inactive iteration @@ -1270,7 +1272,6 @@ def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_e else: assert_that(len(audit_data["response"]["condition"]), equal_to(0)) - @pytest.mark.parametrize( ("campaign_1_start_date", "campaign_2_start_date", "postcode_for_comparator", "expected_campaign_id"), [ diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index 066ed2bb6..db96d3622 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -20,7 +20,7 @@ def campaign_evaluator(): ("RSV", "V", ["COVID"], "VACCINATIONS", None), ("RSV", "S", ["RSV"], "ALL", ("RSV", "S")), ("RSV", "S", ["ALL"], "ALL", ("RSV", "S")), - ("RSV", "S", ["RSV"], "VACCINATIONS", None ), + ("RSV", "S", ["RSV"], "VACCINATIONS", None), ("RSV", "V", ["RSV"], "ALL", ("RSV", "V")), ("FLU", "V", ["COVID", "RSV"], "ALL", None), ("FLU", "S", ["ALL"], "ALL", ("FLU", "S")), @@ -37,10 +37,7 @@ def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 [campaign], conditions_filter, category_filter ) - actual = next( - ((str(name), camp.type) for name, camp in result if camp is not None), - None - ) + actual = next(((str(name), camp.type) for name, camp in result if camp is not None), None) assert actual == expected_result @@ -84,19 +81,26 @@ def test_campaigns_grouped_by_condition_name_with_empty_conditions_filter(campai campaign = rule.CampaignConfigFactory.build(target="RSV", type="V") result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") - actual = [(name, camp) for name, camp in result][0] + actual = next((name, camp) for name, camp in result) assert_that(actual, is_(("RSV", None))) def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_target(campaign_evaluator): - # providing the start_date here, because CampaignConfigFactory used it for iteration_date - campaign1 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C1", start_date = datetime.datetime.now( - datetime.UTC).date() - datetime.timedelta(days=1), iterations=[ - rule.IterationFactory.build()]) - campaign2 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C2", start_date = datetime.datetime.now( - datetime.UTC).date(), iterations=[ - rule.IterationFactory.build()]) + campaign1 = rule.CampaignConfigFactory.build( + target="COVID", + type="V", + id="C1", + start_date=datetime.datetime.now(datetime.UTC).date() - datetime.timedelta(days=1), + iterations=[rule.IterationFactory.build()], + ) + campaign2 = rule.CampaignConfigFactory.build( + target="COVID", + type="V", + id="C2", + start_date=datetime.datetime.now(datetime.UTC).date(), + iterations=[rule.IterationFactory.build()], + ) campaign3 = rule.CampaignConfigFactory.build(target="FLU", type="V", id="F1") inactive_campaign = rule.CampaignConfigFactory.build( target="COVID", type="V", id="C3", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) From 35d9638e91e4eb65481986ceffa77e1963f94095 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 10:56:37 +0000 Subject: [PATCH 20/66] ELI-615 | renamed best_iteration_result to iteration_result_summary --- .../audit/audit_context.py | 14 ++++++------ .../model/eligibility_status.py | 2 +- .../calculators/eligibility_calculator.py | 22 ++++++++++--------- .../processors/action_rule_handler.py | 4 ++-- tests/unit/audit/test_audit_context.py | 14 ++++++------ 5 files changed, 29 insertions(+), 27 deletions(-) diff --git a/src/eligibility_signposting_api/audit/audit_context.py b/src/eligibility_signposting_api/audit/audit_context.py index 53358b4d6..66325669c 100644 --- a/src/eligibility_signposting_api/audit/audit_context.py +++ b/src/eligibility_signposting_api/audit/audit_context.py @@ -19,10 +19,10 @@ ) from eligibility_signposting_api.audit.audit_service import AuditService from eligibility_signposting_api.model.eligibility_status import ( - BestIterationResult, CohortGroupResult, ConditionName, IterationResult, + IterationResultSummary, MatchedActionDetail, Reason, RuleType, @@ -63,13 +63,13 @@ def add_request_details(request: Request) -> None: @staticmethod def append_audit_condition( condition_name: ConditionName, - best_iteration_result: BestIterationResult, + iteration_result_summary: IterationResultSummary, action_detail: MatchedActionDetail, ) -> None: audit_eligibility_cohorts, audit_eligibility_cohort_groups, audit_actions = [], [], [] - best_active_iteration = best_iteration_result.active_iteration - best_candidate = best_iteration_result.iteration_result - best_cohort_results = best_iteration_result.cohort_results + best_active_iteration = iteration_result_summary.active_iteration + best_candidate = iteration_result_summary.iteration_result + best_cohort_results = iteration_result_summary.cohort_results filter_audit_rules, suitability_audit_rules = [], [] if best_cohort_results: @@ -94,8 +94,8 @@ def append_audit_condition( audit_actions = AuditContext.create_audit_actions(action_detail.actions) audit_condition = AuditCondition( - campaign_id=best_iteration_result.campaign_id, - campaign_version=best_iteration_result.campaign_version, + campaign_id=iteration_result_summary.campaign_id, + campaign_version=iteration_result_summary.campaign_version, iteration_id=best_active_iteration.id if best_active_iteration else None, iteration_version=best_active_iteration.version if best_active_iteration else None, condition_name=condition_name, diff --git a/src/eligibility_signposting_api/model/eligibility_status.py b/src/eligibility_signposting_api/model/eligibility_status.py index aa19cd678..89b73828b 100644 --- a/src/eligibility_signposting_api/model/eligibility_status.py +++ b/src/eligibility_signposting_api/model/eligibility_status.py @@ -138,7 +138,7 @@ class IterationResult: @dataclass -class BestIterationResult: +class IterationResultSummary: iteration_result: IterationResult active_iteration: Iteration | None = None campaign_id: CampaignID | None = None diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 92b0ced7d..470c8a860 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -11,12 +11,12 @@ from eligibility_signposting_api.audit.audit_context import AuditContext from eligibility_signposting_api.model import campaign_config, eligibility_status from eligibility_signposting_api.model.eligibility_status import ( - BestIterationResult, CohortGroupResult, Condition, ConditionName, EligibilityStatus, IterationResult, + IterationResultSummary, Reason, Status, StatusText, @@ -94,21 +94,21 @@ def get_eligibility_status( ) for condition_name, campaign in requested_cc_with_active_iteration: if campaign is None: - continue + continue # skipping as no active iteration was found. - best_iteration_result = self.get_iteration_result(campaign) + iteration_result_summary = self.evaluate_iteration_result_summary(campaign) matched_action_detail = self.action_rule_handler.get_actions( self.person, - best_iteration_result.active_iteration, - best_iteration_result.iteration_result, + iteration_result_summary.active_iteration, + iteration_result_summary.iteration_result, include_actions_flag=include_actions_flag, ) - best_iteration_result = TokenProcessor.find_and_replace_tokens(self.person, best_iteration_result) + iteration_result_summary = TokenProcessor.find_and_replace_tokens(self.person, iteration_result_summary) matched_action_detail = TokenProcessor.find_and_replace_tokens(self.person, matched_action_detail) - condition_results[condition_name] = best_iteration_result.iteration_result + condition_results[condition_name] = iteration_result_summary.iteration_result condition_results[condition_name].actions = matched_action_detail.actions condition: Condition = self.build_condition( @@ -119,14 +119,16 @@ def get_eligibility_status( AuditContext.append_audit_condition( condition_name, - best_iteration_result, + iteration_result_summary, matched_action_detail, ) # Consolidate all the results and return return eligibility_status.EligibilityStatus(conditions=final_result) - def get_iteration_result(self, campaign_with_active_iteration: CampaignConfig) -> BestIterationResult: + def evaluate_iteration_result_summary( + self, campaign_with_active_iteration: CampaignConfig + ) -> IterationResultSummary: active_iteration = campaign_with_active_iteration.current_iteration cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( self.person, active_iteration @@ -138,7 +140,7 @@ def get_iteration_result(self, campaign_with_active_iteration: CampaignConfig) - active_iteration.status_text, ConditionName(campaign_with_active_iteration.target), status ) - return BestIterationResult( + return IterationResultSummary( IterationResult(status, status_text, best_cohorts, []), active_iteration, campaign_with_active_iteration.id, diff --git a/src/eligibility_signposting_api/services/processors/action_rule_handler.py b/src/eligibility_signposting_api/services/processors/action_rule_handler.py index b30d4e0cc..44fd4f9ae 100644 --- a/src/eligibility_signposting_api/services/processors/action_rule_handler.py +++ b/src/eligibility_signposting_api/services/processors/action_rule_handler.py @@ -27,14 +27,14 @@ def get_actions( self, person: Person, active_iteration: Iteration | None, - best_iteration_result: IterationResult, + iteration_result: IterationResult, *, include_actions_flag: bool, ) -> MatchedActionDetail: action_detail = MatchedActionDetail() if active_iteration is not None and include_actions_flag: - rule_type = best_iteration_result.status.get_action_rule_type() + rule_type = iteration_result.status.get_action_rule_type() action_detail = self._handle(person, active_iteration, rule_type) return action_detail diff --git a/tests/unit/audit/test_audit_context.py b/tests/unit/audit/test_audit_context.py index 4e20f9bdc..637494228 100644 --- a/tests/unit/audit/test_audit_context.py +++ b/tests/unit/audit/test_audit_context.py @@ -15,11 +15,11 @@ ActionCode, ActionDescription, ActionType, - BestIterationResult, CohortGroupResult, ConditionName, InternalActionCode, IterationResult, + IterationResultSummary, MatchedActionDetail, Reason, RuleCode, @@ -147,7 +147,7 @@ def test_append_audit_condition_adds_condition_to_audit_log_on_g_for_actionable_ campaign_config.RuleName("RedirectRuleName1"), campaign_config.RulePriority(1), suggested_actions ) - best_iteration_results = BestIterationResult( + iteration_result_summary = IterationResultSummary( iteration_result, iteration, campaign_details[0], @@ -158,7 +158,7 @@ def test_append_audit_condition_adds_condition_to_audit_log_on_g_for_actionable_ with app.app_context(): g.audit_log = AuditEvent() - AuditContext.append_audit_condition(condition_name, best_iteration_results, matched_action_detail) + AuditContext.append_audit_condition(condition_name, iteration_result_summary, matched_action_detail) expected_audit_action = [ AuditAction( @@ -227,7 +227,7 @@ def test_should_append_audit_suppression_rules_for_actionable_status(app): ) campaign_details = (CampaignID("CampaignID1"), CampaignVersion(123)) - best_iteration_results = BestIterationResult( + iteration_result_summary = IterationResultSummary( iteration_result, iteration, campaign_details[0], @@ -238,7 +238,7 @@ def test_should_append_audit_suppression_rules_for_actionable_status(app): with app.app_context(): g.audit_log = AuditEvent() - AuditContext.append_audit_condition(condition_name, best_iteration_results, MatchedActionDetail()) + AuditContext.append_audit_condition(condition_name, iteration_result_summary, MatchedActionDetail()) assert g.audit_log.response.condition, condition_name cond = g.audit_log.response.condition[0] @@ -288,7 +288,7 @@ def test_should_append_audit_filter_rules_for_not_actionable_status(app): ) campaign_details = (CampaignID("CampaignID1"), CampaignVersion(123)) - best_iteration_results = BestIterationResult( + iteration_result_summary = IterationResultSummary( iteration_result, iteration, campaign_details[0], @@ -299,7 +299,7 @@ def test_should_append_audit_filter_rules_for_not_actionable_status(app): with app.app_context(): g.audit_log = AuditEvent() - AuditContext.append_audit_condition(condition_name, best_iteration_results, MatchedActionDetail()) + AuditContext.append_audit_condition(condition_name, iteration_result_summary, MatchedActionDetail()) assert g.audit_log.response.condition, condition_name cond = g.audit_log.response.condition[0] From 5d6d0927affaa344d13f4762e95c6b7139bd4541 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:42:12 +0000 Subject: [PATCH 21/66] ELI-615 | add more test cases - it tests --- .../in_process/test_eligibility_endpoint.py | 46 +++++++++++++++++-- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index e40847bbe..600d2ba7b 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -17,7 +17,13 @@ has_key, ) -from eligibility_signposting_api.model.campaign_config import CampaignConfig, RuleComparator +from eligibility_signposting_api.model.campaign_config import ( + CampaignConfig, + RuleAttributeLevel, + RuleComparator, + RuleOperator, + RuleType, +) from eligibility_signposting_api.model.consumer_mapping import ConsumerId, ConsumerMapping from eligibility_signposting_api.model.eligibility_status import ( NHSNumber, @@ -1273,35 +1279,59 @@ def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_e assert_that(len(audit_data["response"]["condition"]), equal_to(0)) @pytest.mark.parametrize( - ("campaign_1_start_date", "campaign_2_start_date", "postcode_for_comparator", "expected_campaign_id"), + ( + "campaign_1_start_date", + "campaign_2_start_date", + "postcode_for_comparator", + "cohort_for_comparator", + "expected_campaign_id", + ), [ ( ("RSV_campaign_id_1", today()), ("RSV_campaign_id_2", today() - timedelta(days=1)), - "SW19", # postcode for resulting in not-actionable + "SW19", # postcode for resulting in not-actionable (used by the suppression rule) + "cohort2", "RSV_campaign_id_1", ), ( ("RSV_campaign_id_1", today() - timedelta(days=1)), ("RSV_campaign_id_2", today()), "SW19", # postcode for resulting in not-actionable + "cohort2", "RSV_campaign_id_2", ), ( ("RSV_campaign_id_1", today()), ("RSV_campaign_id_2", today() - timedelta(days=1)), "M4", # postcode for resulting in actionable + "cohort2", "RSV_campaign_id_1", ), ( ("RSV_campaign_id_1", today() - timedelta(days=1)), ("RSV_campaign_id_2", today()), "M4", # postcode for resulting in actionable + "cohort2", + "RSV_campaign_id_2", + ), + ( + ("RSV_campaign_id_1", today()), + ("RSV_campaign_id_2", today() - timedelta(days=1)), + "M4", # cohort for resulting in not-eligible + "cohort1", + "RSV_campaign_id_1", + ), + ( + ("RSV_campaign_id_1", today() - timedelta(days=1)), + ("RSV_campaign_id_2", today()), + "M4", + "cohort1", # cohort for resulting in not-eligible (used by the filter rule) "RSV_campaign_id_2", ), ], ) - def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaign_with_diff_iteration_date( # noqa : PLR0913 + def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaign_with_diff_iteration_date( # noqa: PLR0913 self, client: FlaskClient, persisted_person_pc_sw19: NHSNumber, @@ -1313,6 +1343,7 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig campaign_1_start_date: tuple[str, date], campaign_2_start_date: tuple[str, date], postcode_for_comparator: str, + cohort_for_comparator: str, expected_campaign_id: NHSNumber, ): # Given @@ -1344,6 +1375,13 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig rule.IterationFactory.build( iteration_date=campaign_1_start_date[1], iteration_rules=[ + rule.IterationRuleFactory.build( + type=RuleType.filter, + name="Exclude if cohort matches", + attribute_level=RuleAttributeLevel.COHORT, + comparator=RuleComparator(cohort_for_comparator), + operator=RuleOperator.member_of, + ), rule.PostcodeSuppressionRuleFactory.build( name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) ), From 7def478d27b10280de013ad614aa0eb8412a59a1 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:57:39 +0000 Subject: [PATCH 22/66] ELI-615 | test commit - try git leaks ignore --- .gitleaksignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitleaksignore b/.gitleaksignore index cceb449a3..ff9cec0ef 100644 --- a/.gitleaksignore +++ b/.gitleaksignore @@ -1,3 +1,5 @@ # SEE: https://github.com/gitleaks/gitleaks/blob/master/README.md#gitleaksignore cd9c0efec38c5d63053dd865e5d4e207c0760d91:docs/guides/Perform_static_analysis.md:generic-api-key:37 + +bf0c77098978c450d8570b38fb480fbb8d6a0628:.github/instructions/*.instructions.md:stripe-access-token:140 From 750a2c50e88966c7b4a65fbd8c339ac4fa0b4cf3 Mon Sep 17 00:00:00 2001 From: Oneeb <258801025+oneeb-nhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 15:15:00 +0000 Subject: [PATCH 23/66] updated iteration time --- .../model/campaign_config.py | 68 +++++++++++-------- .../test_campaign_config_validator.py | 19 ++++++ 2 files changed, 60 insertions(+), 27 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index c2bdd6a8f..711880ecc 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -4,7 +4,7 @@ import re import typing from collections import Counter -from datetime import UTC, date, datetime +from datetime import UTC, date, datetime, time from enum import StrEnum from functools import cached_property from operator import attrgetter @@ -33,6 +33,7 @@ IterationVersion = NewType("IterationVersion", int) IterationID = NewType("IterationID", str) IterationDate = NewType("IterationDate", date) +IterationTime = NewType("IterationTime", time) RuleName = NewType("RuleName", str) RuleDescription = NewType("RuleDescription", str) RulePriority = NewType("RulePriority", int) @@ -119,19 +120,19 @@ class IterationCohort(BaseModel): @cached_property def is_virtual_cohort(self) -> bool: - return self.virtual == Virtual.YES + return self.virtual == Virtual.YES @field_validator("virtual", mode="before") @classmethod def normalize_virtual(cls, value: str) -> Virtual: if value is None: - return Virtual.NO + return Virtual.NO if isinstance(value, str): value = value.strip().upper() if value == "Y": - return Virtual.YES + return Virtual.YES if value == "N": - return Virtual.NO + return Virtual.NO msg = f"Invalid value for Virtual: {value!r}" raise ValueError(msg) @@ -160,8 +161,8 @@ class IterationRule(BaseModel): @field_validator("rule_stop", mode="before") def parse_yn_to_bool(cls, v: str | bool) -> bool: # noqa: N805, FBT001 if isinstance(v, str): - return v.upper() == "Y" - return v + return v.upper() == "Y" + return v _parent: Iteration | None = PrivateAttr(default=None) @@ -183,7 +184,7 @@ def rule_code(self) -> str: for rule_entry in self._parent.rules_mapper.values(): if rule_entry and self.name in rule_entry.rule_names: rule_code = rule_entry.rule_code - return rule_code or self.code or self.name + return rule_code or self.code or self.name @property def rule_text(self) -> str: @@ -200,7 +201,7 @@ def rule_text(self) -> str: for rule_entry in self._parent.rules_mapper.values(): if rule_entry and self.name in rule_entry.rule_names: rule_text = rule_entry.rule_text - return rule_text or self.description + return rule_text or self.description @cached_property def parsed_cohort_labels(self) -> list[str]: @@ -211,11 +212,11 @@ def parsed_cohort_labels(self) -> list[str]: A list of cohort labels, split by comma. If no label is set, returns an empty list. """ if not self.cohort_label: - return [] - return [label.strip() for label in self.cohort_label.split(",") if label.strip()] + return [] + return [label.strip() for label in self.cohort_label.split(",") if label.strip()] def __str__(self) -> str: - return json.dumps(self.model_dump(by_alias=True), indent=2) + return json.dumps(self.model_dump(by_alias=True), indent=2) class AvailableAction(BaseModel): @@ -230,7 +231,7 @@ class AvailableAction(BaseModel): class ActionsMapper(RootModel[dict[str, AvailableAction]]): def get(self, key: str, default: AvailableAction | None = None) -> AvailableAction | None: - return self.root.get(key, default) + return self.root.get(key, default) class StatusText(BaseModel): @@ -251,10 +252,10 @@ class RuleEntry(BaseModel): class RulesMapper(RootModel[dict[str, RuleEntry]]): def get(self, key: str, default: RuleEntry | None = None) -> RuleEntry | None: - return self.root.get(key, default) + return self.root.get(key, default) def values(self) -> list[RuleEntry]: - return list(self.root.values()) + return list(self.root.values()) class Iteration(BaseModel): @@ -262,6 +263,7 @@ class Iteration(BaseModel): version: IterationVersion = Field(..., alias="Version") name: IterationName = Field(..., alias="Name") iteration_date: IterationDate = Field(..., alias="IterationDate") + iteration_time: IterationTime = Field(..., alias="IterationTime") iteration_number: int | None = Field(None, alias="IterationNumber") approval_minimum: int | None = Field(None, alias="ApprovalMinimum") approval_maximum: int | None = Field(None, alias="ApprovalMaximum") @@ -287,7 +289,7 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @classmethod def parse_dates(cls, v: str | date) -> date: if isinstance(v, date): - return v + return v v_str = str(v) @@ -296,7 +298,7 @@ def parse_dates(cls, v: str | date) -> date: raise ValueError(msg) try: - return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 + return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 except ValueError as err: msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." raise ValueError(msg) from err @@ -306,10 +308,22 @@ def parse_dates(cls, v: str | date) -> date: def serialize_dates(v: date, _info: SerializationInfo) -> str: return v.strftime("%Y%m%d") + @property + def get_iteration_datetime(self) -> datetime: + iteration_time = ( + self.iteration_time + or getattr(getattr(self, "parent", None), "default_iteration_time", None) + ) + + if iteration_time is None: + raise ValueError("No iteration_time available on object or parent.default_iteration_time.") + + return datetime.combine(self.iteration_date, iteration_time) + + def __str__(self) -> str: return json.dumps(self.model_dump(by_alias=True), indent=2) - class CampaignConfig(BaseModel): id: CampaignID = Field(..., alias="ID") version: CampaignVersion = Field(..., alias="Version") @@ -321,7 +335,7 @@ class CampaignConfig(BaseModel): reviewer: list[str] | None = Field(None, alias="Reviewer") iteration_frequency: Literal["X", "D", "W", "M", "Q", "A"] = Field(..., alias="IterationFrequency") iteration_type: Literal["A", "M", "S", "O"] = Field(..., alias="IterationType") - iteration_time: str | None = Field(None, alias="IterationTime") + default_iteration_time: IterationTime = Field(default=IterationTime(time(0, 0, 0)), alias="IterationTime") default_comms_routing: str | None = Field(None, alias="DefaultCommsRouting") start_date: StartDate = Field(..., alias="StartDate") end_date: EndDate = Field(..., alias="EndDate") @@ -335,7 +349,7 @@ class CampaignConfig(BaseModel): @classmethod def parse_dates(cls, v: str | date) -> date: if isinstance(v, date): - return v + return v v_str = str(v) @@ -344,7 +358,7 @@ def parse_dates(cls, v: str | date) -> date: raise ValueError(msg) try: - return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 + return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 except ValueError as err: msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." raise ValueError(msg) from err @@ -352,14 +366,14 @@ def parse_dates(cls, v: str | date) -> date: @field_serializer("start_date", "end_date", when_used="always") @staticmethod def serialize_dates(v: date, _info: SerializationInfo) -> str: - return v.strftime("%Y%m%d") + return v.strftime("%Y%m%d") @model_validator(mode="after") def check_start_and_end_dates_sensible(self) -> typing.Self: if self.start_date > self.end_date: message = f"start date {self.start_date} after end date {self.end_date}" raise ValueError(message) - return self + return self @model_validator(mode="after") def check_no_overlapping_iterations(self) -> typing.Self: @@ -368,21 +382,21 @@ def check_no_overlapping_iterations(self) -> typing.Self: iteration_date, count = multiple_found message = f"{count} iterations with iteration date {iteration_date} in campaign {self.id}" raise ValueError(message) - return self + return self @cached_property def campaign_live(self) -> bool: today = datetime.now(tz=UTC).date() - return self.start_date <= today <= self.end_date + return self.start_date <= today <= self.end_date @cached_property def current_iteration(self) -> Iteration: today = datetime.now(tz=UTC).date() iterations_by_date_descending = sorted(self.iterations, key=attrgetter("iteration_date"), reverse=True) - return next(i for i in iterations_by_date_descending if i.iteration_date <= today) + return next(i for i in iterations_by_date_descending if i.iteration_date <= today) def __str__(self) -> str: - return json.dumps(self.model_dump(by_alias=True), indent=2) + return json.dumps(self.model_dump(by_alias=True), indent=2) class Rules(BaseModel): diff --git a/tests/unit/validation/test_campaign_config_validator.py b/tests/unit/validation/test_campaign_config_validator.py index 216369a98..0b5c895d7 100644 --- a/tests/unit/validation/test_campaign_config_validator.py +++ b/tests/unit/validation/test_campaign_config_validator.py @@ -318,3 +318,22 @@ def test_approval_minimum_greater_than_approval_maximum_is_invalid( data["ApprovalMinimum"] = approval_min data["ApprovalMaximum"] = approval_max CampaignConfigValidation(**data) + + def test_iteration_time_overrides_default_iteration_time( + self, + valid_iteration_config_with_only_mandatory_fields, + ): + # Arrange + data = valid_iteration_config_with_only_mandatory_fields.copy() + data["default_iteration_time"] = "09:00:00" + data["iteration_time"] = "14:30:00" + config = CampaignConfigValidation(**data) + + # Act + result = config.get_iteration_datetime + + # Assert + assert result.time() == IterationTime(14, 30), ( + "Expected iteration_time to take precedence over default_iteration_time" + ) + From 55bdb2dfbe44c2e1aa045378c9b76b5fe22aca2e Mon Sep 17 00:00:00 2001 From: Karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 15:53:39 +0000 Subject: [PATCH 24/66] Eli 615 : fix - multi campaign target collision (#593) * ELI-615 | campaign having recent - active start_date supersedes the others sharing same best-status * ELI-615 | more linting * ELI-615 | revert commit * ELI-615 | wip * ELI-615 | wip * ELI-615 | wip * Bump werkzeug from 3.1.5 to 3.1.6 Bumps [werkzeug](https://github.com/pallets/werkzeug) from 3.1.5 to 3.1.6. - [Release notes](https://github.com/pallets/werkzeug/releases) - [Changelog](https://github.com/pallets/werkzeug/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/werkzeug/compare/3.1.5...3.1.6) --- updated-dependencies: - dependency-name: werkzeug dependency-version: 3.1.6 dependency-type: indirect ... Signed-off-by: dependabot[bot] * Updated not_member_of operator to NotMemberOf (#594) * Added vulture to workflows (#585) * Added vulture to workflows * Added new make commands and added to project * Added updated lockfile * Minimal config with no errors * Corrected vulture commands * Generating new lock file * ELI-615 | modified iterations_result to iteration result * ELI-615 | fix - naming issues | handle stop iter exception * ELI-615 | campaign_configs - fixture updated | test case fixed * ELI-615 | fix flaky tests do to fixture scope * ELI-615 | fix flaky tests - removed best status test * ELI-615 | used raw campagin config for tests using iteration dates * ELI-615 | fix - campaign group is used correctly * ELI-615 | fix test_campaigns_grouped_by_condition_name_filters_correctly * ELI-615 | fix tests * ELI-615 | linting * ELI-615 | renamed best_iteration_result to iteration_result_summary * ELI-615 | add more test cases - it tests * ELI-615 | test commit - try git leaks ignore * ELI-615 | incorporated review comments --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: oneeb-nhs <258801025+oneeb-nhs@users.noreply.github.com> Co-authored-by: Robert Bailiff --- .../calculators/eligibility_calculator.py | 3 - .../services/processors/campaign_evaluator.py | 13 ++- .../in_process/test_eligibility_endpoint.py | 97 +++++++++++++++++++ .../processors/test_campaign_evaluator.py | 7 +- 4 files changed, 109 insertions(+), 11 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 470c8a860..9f83bd916 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -93,9 +93,6 @@ def get_eligibility_status( ) ) for condition_name, campaign in requested_cc_with_active_iteration: - if campaign is None: - continue # skipping as no active iteration was found. - iteration_result_summary = self.evaluate_iteration_result_summary(campaign) matched_action_detail = self.action_rule_handler.get_actions( diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 119c601ef..9d1a9aad1 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -45,9 +45,10 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf cc_with_max_iteration_date: list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] if len(cc_with_max_iteration_date) > 1: err_msg = ( - f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations " - f"for target {cc_with_max_iteration_date[0].current_iteration.iteration_date}" - f"found for date {max_date}" + f"Ambiguous result: '{len(cc_with_max_iteration_date)}' active iterations " + f"for target {cc_with_max_iteration_date[0].target} " + f"found for date '{max_date}' " + f"across campaign(s) {[cc.id for cc in cc_with_max_iteration_date]}" ) raise ValueError(err_msg) @@ -57,7 +58,7 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf def get_campaign_with_latest_active_iteration_per_target( self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str - ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig | None]]: + ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig]]: mapping = { "ALL": {"V", "S"}, "VACCINATIONS": {"V"}, @@ -79,4 +80,6 @@ def get_campaign_with_latest_active_iteration_per_target( c for c in campaign_group if filter_all_conditions or str(condition_name) in conditions ] - yield condition_name, self.get_campaign_with_latest_iteration(filtered_campaigns) + campaign = self.get_campaign_with_latest_iteration(filtered_campaigns) + if campaign is not None: + yield (condition_name, campaign) diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 600d2ba7b..bc6fe2693 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1445,3 +1445,100 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig assert_that(audit_data["response"]["condition"][0].get("campaignId"), equal_to(expected_campaign_id)) else: assert_that(len(audit_data["response"]["condition"]), equal_to(0)) + + def test_if_multiple_active_iterations_with_same_iteration_datetime_for_the_same_target_throws_internal_error( # noqa: PLR0913 + self, + client: FlaskClient, + persisted_person_pc_sw19: NHSNumber, + s3_client: BaseClient, + consumer_mapping_bucket: BucketName, + rules_bucket: BucketName, + secretsmanager_client: BaseClient, # noqa: ARG002 + caplog, + ): + # Given + consumer_id = "consumer-n3bs-jo4hn-ce4na" + headers = {"nhs-login-nhs-number": str(persisted_person_pc_sw19), UNIQUE_CONSUMER_HEADER: consumer_id} + + # Consumer Mapping Data + s3_client.put_object( + Bucket=consumer_mapping_bucket, + Key="consumer_mapping_config.json", + Body=json.dumps( + { + consumer_id: [ + {"CampaignConfigID": "RSV_campaign_id_1"}, + {"CampaignConfigID": "RSV_campaign_id_2"}, + ], + } + ), + ContentType="application/json", + ) + previous_day = yesterday() + # Campaign configs + campaign_1 = rule.RawCampaignConfigFactory.build( + id="RSV_campaign_id_1", + target="RSV", + start_date=previous_day, + type="V", + iterations=[rule.IterationFactory.build(iteration_date=previous_day)], + ) + + campaign_2 = rule.RawCampaignConfigFactory.build( + id="RSV_campaign_id_2", + target="RSV", + start_date=previous_day, + type="V", + iterations=[rule.IterationFactory.build(iteration_date=previous_day)], + ) + + for campaign in [campaign_1, campaign_2]: + s3_client.put_object( + Bucket=rules_bucket, + Key=f"{campaign.id}.json", + Body=json.dumps({"CampaignConfig": campaign.model_dump(by_alias=True)}), + ContentType="application/json", + ) + + # When + response = client.get(f"/patient-check/{persisted_person_pc_sw19}", headers=headers) + + assert_that( + response, + is_response() + .with_status_code(HTTPStatus.INTERNAL_SERVER_ERROR) + .with_headers(has_entries({"Content-Type": "application/fhir+json"})) + .and_text( + is_json_that( + has_entries( + resourceType="OperationOutcome", + issue=contains_exactly( + has_entries( + severity="error", + code="processing", + diagnostics="An unexpected error occurred.", + details={ + "coding": [ + { + "system": "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", + "code": "INTERNAL_SERVER_ERROR", + "display": "An unexpected internal server error occurred.", + } + ] + }, + ) + ), + ) + ) + ), + ) + + err_msg = ( + "Ambiguous result: '2' active iterations " + "for target RSV " + f"found for date '{previous_day}' " + "across campaign(s) ['RSV_campaign_id_1', 'RSV_campaign_id_2']" + ) + assert any(err_msg in message for message in caplog.messages), ( + f"Expected log message not found. Logged messages: {caplog.messages}" + ) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index db96d3622..4a0e1330f 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -79,10 +79,11 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( def test_campaigns_grouped_by_condition_name_with_empty_conditions_filter(campaign_evaluator): campaign = rule.CampaignConfigFactory.build(target="RSV", type="V") - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") + result = list( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") + ) - actual = next((name, camp) for name, camp in result) - assert_that(actual, is_(("RSV", None))) + assert_that(result, is_([])) def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_target(campaign_evaluator): From f786f8962cb44821347a88f2828260e863e533a3 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 16:07:26 +0000 Subject: [PATCH 25/66] ELI-674 | fix - vs code alignment anomalies --- .../model/campaign_config.py | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 711880ecc..30cd16054 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -120,19 +120,19 @@ class IterationCohort(BaseModel): @cached_property def is_virtual_cohort(self) -> bool: - return self.virtual == Virtual.YES + return self.virtual == Virtual.YES @field_validator("virtual", mode="before") @classmethod def normalize_virtual(cls, value: str) -> Virtual: if value is None: - return Virtual.NO + return Virtual.NO if isinstance(value, str): value = value.strip().upper() if value == "Y": - return Virtual.YES + return Virtual.YES if value == "N": - return Virtual.NO + return Virtual.NO msg = f"Invalid value for Virtual: {value!r}" raise ValueError(msg) @@ -161,8 +161,8 @@ class IterationRule(BaseModel): @field_validator("rule_stop", mode="before") def parse_yn_to_bool(cls, v: str | bool) -> bool: # noqa: N805, FBT001 if isinstance(v, str): - return v.upper() == "Y" - return v + return v.upper() == "Y" + return v _parent: Iteration | None = PrivateAttr(default=None) @@ -184,7 +184,7 @@ def rule_code(self) -> str: for rule_entry in self._parent.rules_mapper.values(): if rule_entry and self.name in rule_entry.rule_names: rule_code = rule_entry.rule_code - return rule_code or self.code or self.name + return rule_code or self.code or self.name @property def rule_text(self) -> str: @@ -201,7 +201,7 @@ def rule_text(self) -> str: for rule_entry in self._parent.rules_mapper.values(): if rule_entry and self.name in rule_entry.rule_names: rule_text = rule_entry.rule_text - return rule_text or self.description + return rule_text or self.description @cached_property def parsed_cohort_labels(self) -> list[str]: @@ -212,11 +212,11 @@ def parsed_cohort_labels(self) -> list[str]: A list of cohort labels, split by comma. If no label is set, returns an empty list. """ if not self.cohort_label: - return [] - return [label.strip() for label in self.cohort_label.split(",") if label.strip()] + return [] + return [label.strip() for label in self.cohort_label.split(",") if label.strip()] def __str__(self) -> str: - return json.dumps(self.model_dump(by_alias=True), indent=2) + return json.dumps(self.model_dump(by_alias=True), indent=2) class AvailableAction(BaseModel): @@ -231,7 +231,7 @@ class AvailableAction(BaseModel): class ActionsMapper(RootModel[dict[str, AvailableAction]]): def get(self, key: str, default: AvailableAction | None = None) -> AvailableAction | None: - return self.root.get(key, default) + return self.root.get(key, default) class StatusText(BaseModel): @@ -252,10 +252,10 @@ class RuleEntry(BaseModel): class RulesMapper(RootModel[dict[str, RuleEntry]]): def get(self, key: str, default: RuleEntry | None = None) -> RuleEntry | None: - return self.root.get(key, default) + return self.root.get(key, default) def values(self) -> list[RuleEntry]: - return list(self.root.values()) + return list(self.root.values()) class Iteration(BaseModel): @@ -289,7 +289,7 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @classmethod def parse_dates(cls, v: str | date) -> date: if isinstance(v, date): - return v + return v v_str = str(v) @@ -298,7 +298,7 @@ def parse_dates(cls, v: str | date) -> date: raise ValueError(msg) try: - return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 + return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 except ValueError as err: msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." raise ValueError(msg) from err @@ -349,7 +349,7 @@ class CampaignConfig(BaseModel): @classmethod def parse_dates(cls, v: str | date) -> date: if isinstance(v, date): - return v + return v v_str = str(v) @@ -358,7 +358,7 @@ def parse_dates(cls, v: str | date) -> date: raise ValueError(msg) try: - return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 + return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 except ValueError as err: msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." raise ValueError(msg) from err @@ -366,14 +366,14 @@ def parse_dates(cls, v: str | date) -> date: @field_serializer("start_date", "end_date", when_used="always") @staticmethod def serialize_dates(v: date, _info: SerializationInfo) -> str: - return v.strftime("%Y%m%d") + return v.strftime("%Y%m%d") @model_validator(mode="after") def check_start_and_end_dates_sensible(self) -> typing.Self: if self.start_date > self.end_date: message = f"start date {self.start_date} after end date {self.end_date}" raise ValueError(message) - return self + return self @model_validator(mode="after") def check_no_overlapping_iterations(self) -> typing.Self: @@ -382,21 +382,21 @@ def check_no_overlapping_iterations(self) -> typing.Self: iteration_date, count = multiple_found message = f"{count} iterations with iteration date {iteration_date} in campaign {self.id}" raise ValueError(message) - return self + return self @cached_property def campaign_live(self) -> bool: today = datetime.now(tz=UTC).date() - return self.start_date <= today <= self.end_date + return self.start_date <= today <= self.end_date @cached_property def current_iteration(self) -> Iteration: today = datetime.now(tz=UTC).date() iterations_by_date_descending = sorted(self.iterations, key=attrgetter("iteration_date"), reverse=True) - return next(i for i in iterations_by_date_descending if i.iteration_date <= today) + return next(i for i in iterations_by_date_descending if i.iteration_date <= today) def __str__(self) -> str: - return json.dumps(self.model_dump(by_alias=True), indent=2) + return json.dumps(self.model_dump(by_alias=True), indent=2) class Rules(BaseModel): From a02f622f3443fea8bc2ea46ae8bc9c6f958f4e9c Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 17:17:13 +0000 Subject: [PATCH 26/66] ELI-674 |wip - updated iteration_datetime property --- .../model/campaign_config.py | 36 ++++++++++----- .../services/processors/campaign_evaluator.py | 10 +++-- .../test_campaign_config_validator.py | 19 -------- .../validation/test_iteration_validator.py | 44 ++++++++++++++++++- 4 files changed, 75 insertions(+), 34 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 30cd16054..0e75916e1 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -263,7 +263,7 @@ class Iteration(BaseModel): version: IterationVersion = Field(..., alias="Version") name: IterationName = Field(..., alias="Name") iteration_date: IterationDate = Field(..., alias="IterationDate") - iteration_time: IterationTime = Field(..., alias="IterationTime") + iteration_time: time | None = Field(default=None, alias="IterationTime") iteration_number: int | None = Field(None, alias="IterationNumber") approval_minimum: int | None = Field(None, alias="ApprovalMinimum") approval_maximum: int | None = Field(None, alias="ApprovalMaximum") @@ -308,22 +308,32 @@ def parse_dates(cls, v: str | date) -> date: def serialize_dates(v: date, _info: SerializationInfo) -> str: return v.strftime("%Y%m%d") - @property - def get_iteration_datetime(self) -> datetime: - iteration_time = ( - self.iteration_time - or getattr(getattr(self, "parent", None), "default_iteration_time", None) - ) + @field_serializer("iteration_time", when_used="always") + @staticmethod + def serialize_time(v: date, _info: SerializationInfo) -> str | None: + return v.strftime("%H:%M:%S") if v else None - if iteration_time is None: - raise ValueError("No iteration_time available on object or parent.default_iteration_time.") + _parent: CampaignConfig | None = PrivateAttr(default=None) - return datetime.combine(self.iteration_date, iteration_time) + def set_parent(self, parent: CampaignConfig) -> None: + self._parent = parent + @cached_property + def iteration_datetime(self) -> datetime: + if self.iteration_time: + iteration_time = self.iteration_time + elif self._parent: + iteration_time = self._parent.default_iteration_time + else: + msg = f"No iteration_time and no parent linked for iteration {self.id}" + raise ValueError(msg) + + return datetime.combine(self.iteration_date, iteration_time) def __str__(self) -> str: return json.dumps(self.model_dump(by_alias=True), indent=2) + class CampaignConfig(BaseModel): id: CampaignID = Field(..., alias="ID") version: CampaignVersion = Field(..., alias="Version") @@ -345,6 +355,12 @@ class CampaignConfig(BaseModel): model_config = {"populate_by_name": True, "arbitrary_types_allowed": True, "extra": "ignore"} + def __init__(self, **data: dict[str, typing.Any]) -> None: + super().__init__(**data) + # Ensure each rule knows its parent iteration + for iteration in self.iterations: + iteration.set_parent(self) + @field_validator("start_date", "end_date", mode="before") @classmethod def parse_dates(cls, v: str | date) -> date: diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 9d1a9aad1..a63f44c14 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -31,7 +31,7 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf for cc in active_campaigns: try: - valid_items.append((cc.current_iteration.iteration_date, cc)) + valid_items.append((cc.current_iteration.iteration_datetime, cc)) except StopIteration: logger.info( "Skipping campaign ID %s as no active iteration was found.", @@ -41,13 +41,15 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf if not valid_items: latest_campaign = None else: - max_date = max(item[0] for item in valid_items) - cc_with_max_iteration_date: list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] + max_date_time = max(item[0] for item in valid_items) + cc_with_max_iteration_date: list[CampaignConfig] = [ + item[1] for item in valid_items if item[0] == max_date_time + ] if len(cc_with_max_iteration_date) > 1: err_msg = ( f"Ambiguous result: '{len(cc_with_max_iteration_date)}' active iterations " f"for target {cc_with_max_iteration_date[0].target} " - f"found for date '{max_date}' " + f"found for date '{max_date_time}' " f"across campaign(s) {[cc.id for cc in cc_with_max_iteration_date]}" ) raise ValueError(err_msg) diff --git a/tests/unit/validation/test_campaign_config_validator.py b/tests/unit/validation/test_campaign_config_validator.py index 0b5c895d7..216369a98 100644 --- a/tests/unit/validation/test_campaign_config_validator.py +++ b/tests/unit/validation/test_campaign_config_validator.py @@ -318,22 +318,3 @@ def test_approval_minimum_greater_than_approval_maximum_is_invalid( data["ApprovalMinimum"] = approval_min data["ApprovalMaximum"] = approval_max CampaignConfigValidation(**data) - - def test_iteration_time_overrides_default_iteration_time( - self, - valid_iteration_config_with_only_mandatory_fields, - ): - # Arrange - data = valid_iteration_config_with_only_mandatory_fields.copy() - data["default_iteration_time"] = "09:00:00" - data["iteration_time"] = "14:30:00" - config = CampaignConfigValidation(**data) - - # Act - result = config.get_iteration_datetime - - # Assert - assert result.time() == IterationTime(14, 30), ( - "Expected iteration_time to take precedence over default_iteration_time" - ) - diff --git a/tests/unit/validation/test_iteration_validator.py b/tests/unit/validation/test_iteration_validator.py index 3c58c33c9..747c57bd5 100644 --- a/tests/unit/validation/test_iteration_validator.py +++ b/tests/unit/validation/test_iteration_validator.py @@ -1,10 +1,11 @@ from collections import Counter -from datetime import UTC, datetime +from datetime import UTC, datetime, time from typing import ClassVar import pytest from pydantic import ValidationError +from rules_validation_api.validators.campaign_config_validator import CampaignConfigValidation from rules_validation_api.validators.iteration_validator import IterationValidation @@ -504,3 +505,44 @@ def test_invalid_iteration_collects_errors_if_iteration_rules_have_invalid_data( # Assert messages contain the expected text assert "AttributeName must be set" in errors[0]["msg"] assert "AttributeName must be set" in errors[1]["msg"] + + @pytest.mark.parametrize( + ("iteration_time_input", "default_time_iteration_input", "expected_time"), + [ + # Case 1: Iteration time overrides default + ("14:30:00", "09:00:00", time(14, 30, 0)), + # Case 2: Iteration time is missing, so it uses default_iteration_time + (None, "09:00:00", time(9, 0, 0)), + # Case 3: Both are the same + ("10:00:00", "10:00:00", time(10, 0, 0)), + # Case 4: Both are None, falls back to default value (12 AM) in default_iteration_time + (None, None, time(0, 0, 0)), + ], + ) + def test_iteration_get_iteration_time_property( + self, + valid_campaign_config_with_only_mandatory_fields, + valid_iteration_with_only_mandatory_fields, + iteration_time_input, + default_time_iteration_input, + expected_time, + ): + iteration_data = valid_iteration_with_only_mandatory_fields.copy() + iteration_data["IterationTime"] = iteration_time_input + iteration_data["IterationDate"] = "20250102" # matching campaign start_date + + data = valid_campaign_config_with_only_mandatory_fields.copy() + + if default_time_iteration_input: + data["default_iteration_time"] = default_time_iteration_input + + data["Iterations"] = [iteration_data] + + config = CampaignConfigValidation(**data) + + result = config.iterations[0].iteration_datetime + + assert result.time() == expected_time, ( + f"Failed! Input: {iteration_time_input}, Default: {default_time_iteration_input}. " + f"Expected {expected_time} but got {result.time()}" + ) From c50c03793a51a8279d8462628ba5c9bca20925e2 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Thu, 5 Mar 2026 10:03:24 +0000 Subject: [PATCH 27/66] ELI-674 - test_iteration_full_datetime_validation checks for datetime --- .../validation/test_iteration_validator.py | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/unit/validation/test_iteration_validator.py b/tests/unit/validation/test_iteration_validator.py index 747c57bd5..bcc841c58 100644 --- a/tests/unit/validation/test_iteration_validator.py +++ b/tests/unit/validation/test_iteration_validator.py @@ -1,5 +1,5 @@ from collections import Counter -from datetime import UTC, datetime, time +from datetime import UTC, datetime from typing import ClassVar import pytest @@ -507,29 +507,30 @@ def test_invalid_iteration_collects_errors_if_iteration_rules_have_invalid_data( assert "AttributeName must be set" in errors[1]["msg"] @pytest.mark.parametrize( - ("iteration_time_input", "default_time_iteration_input", "expected_time"), + ("iteration_time_input", "default_time_iteration_input", "expected_date_time"), [ # Case 1: Iteration time overrides default - ("14:30:00", "09:00:00", time(14, 30, 0)), + ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0)), # Case 2: Iteration time is missing, so it uses default_iteration_time - (None, "09:00:00", time(9, 0, 0)), + (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0)), # Case 3: Both are the same - ("10:00:00", "10:00:00", time(10, 0, 0)), + ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0)), # Case 4: Both are None, falls back to default value (12 AM) in default_iteration_time - (None, None, time(0, 0, 0)), + (None, None, datetime(2025, 1, 2, 0, 0, 0)), ], ) - def test_iteration_get_iteration_time_property( + def test_iteration_full_datetime_validation( self, valid_campaign_config_with_only_mandatory_fields, valid_iteration_with_only_mandatory_fields, iteration_time_input, default_time_iteration_input, - expected_time, + expected_date_time, ): + # Given iteration_data = valid_iteration_with_only_mandatory_fields.copy() iteration_data["IterationTime"] = iteration_time_input - iteration_data["IterationDate"] = "20250102" # matching campaign start_date + iteration_data["IterationDate"] = "20250102" # between campaign start_date and end_date data = valid_campaign_config_with_only_mandatory_fields.copy() @@ -538,11 +539,13 @@ def test_iteration_get_iteration_time_property( data["Iterations"] = [iteration_data] + # When config = CampaignConfigValidation(**data) + # Then result = config.iterations[0].iteration_datetime - assert result.time() == expected_time, ( + assert result == expected_date_time, ( f"Failed! Input: {iteration_time_input}, Default: {default_time_iteration_input}. " - f"Expected {expected_time} but got {result.time()}" + f"Expected {expected_date_time} but got {result}" ) From 046faf44bbe78240e213f54dce98b0b2ac4625e6 Mon Sep 17 00:00:00 2001 From: Oneeb <258801025+oneeb-nhs@users.noreply.github.com> Date: Thu, 5 Mar 2026 12:08:29 +0000 Subject: [PATCH 28/66] updated iteration time --- .../model/campaign_config.py | 51 ++++++++++++++++++- .../validation/test_iteration_validator.py | 8 +-- 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 0e75916e1..6c2bfb876 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -263,7 +263,7 @@ class Iteration(BaseModel): version: IterationVersion = Field(..., alias="Version") name: IterationName = Field(..., alias="Name") iteration_date: IterationDate = Field(..., alias="IterationDate") - iteration_time: time | None = Field(default=None, alias="IterationTime") + iteration_time: IterationTime | None = Field(default=None, alias="IterationTime") iteration_number: int | None = Field(None, alias="IterationNumber") approval_minimum: int | None = Field(None, alias="ApprovalMinimum") approval_maximum: int | None = Field(None, alias="ApprovalMaximum") @@ -303,6 +303,27 @@ def parse_dates(cls, v: str | date) -> date: msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." raise ValueError(msg) from err + @field_validator("iteration_time", mode="before") + @classmethod + def parse_times(cls, v: str | time) -> time | None: + if not v: + return None + if isinstance(v, time): + return v + + v_str = str(v).strip() + + if re.fullmatch(r"^\d{2}:\d{2}:\d{2}$", v_str): + try: + return datetime.strptime(v_str, "%H:%M:%S").time() # noqa: DTZ007 + except ValueError as err: + msg = f"Invalid time value: {v_str}. Must be a valid time in HH:MM:SS." + raise ValueError(msg) from err + + # If none matched, raise a format error + msg = f"Invalid format: {v_str}. Must be HH:MM:SS." + raise ValueError(msg) + @field_serializer("iteration_date", when_used="always") @staticmethod def serialize_dates(v: date, _info: SerializationInfo) -> str: @@ -310,7 +331,7 @@ def serialize_dates(v: date, _info: SerializationInfo) -> str: @field_serializer("iteration_time", when_used="always") @staticmethod - def serialize_time(v: date, _info: SerializationInfo) -> str | None: + def serialize_time(v: time, _info: SerializationInfo) -> str | None: return v.strftime("%H:%M:%S") if v else None _parent: CampaignConfig | None = PrivateAttr(default=None) @@ -379,11 +400,37 @@ def parse_dates(cls, v: str | date) -> date: msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." raise ValueError(msg) from err + @field_validator("default_iteration_time", mode="before") + @classmethod + def parse_times(cls, v: str | time) -> time | None: + if not v: + return None + if isinstance(v, time): + return v + + v_str = str(v).strip() + + if re.fullmatch(r"^\d{2}:\d{2}:\d{2}$", v_str): + try: + return datetime.strptime(v_str, "%H:%M:%S").time() # noqa: DTZ007 + except ValueError as err: + msg = f"Invalid time value: {v_str}. Must be a valid time in HH:MM:SS." + raise ValueError(msg) from err + + # If none matched, raise a format error + msg = f"Invalid format: {v_str}. Must be HH:MM:SS." + raise ValueError(msg) + @field_serializer("start_date", "end_date", when_used="always") @staticmethod def serialize_dates(v: date, _info: SerializationInfo) -> str: return v.strftime("%Y%m%d") + @field_serializer("default_iteration_time", when_used="always") + @staticmethod + def serialize_time(v: time, _info: SerializationInfo) -> str | None: + return v.strftime("%H:%M:%S") if v else None + @model_validator(mode="after") def check_start_and_end_dates_sensible(self) -> typing.Self: if self.start_date > self.end_date: diff --git a/tests/unit/validation/test_iteration_validator.py b/tests/unit/validation/test_iteration_validator.py index bcc841c58..d258d479a 100644 --- a/tests/unit/validation/test_iteration_validator.py +++ b/tests/unit/validation/test_iteration_validator.py @@ -510,13 +510,13 @@ def test_invalid_iteration_collects_errors_if_iteration_rules_have_invalid_data( ("iteration_time_input", "default_time_iteration_input", "expected_date_time"), [ # Case 1: Iteration time overrides default - ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0)), + ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0, tzinfo=UTC)), # Case 2: Iteration time is missing, so it uses default_iteration_time - (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0)), + (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0, tzinfo=UTC)), # Case 3: Both are the same - ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0)), + ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0, tzinfo=UTC)), # Case 4: Both are None, falls back to default value (12 AM) in default_iteration_time - (None, None, datetime(2025, 1, 2, 0, 0, 0)), + (None, None, datetime(2025, 1, 2, 0, 0, 0, tzinfo=UTC)), ], ) def test_iteration_full_datetime_validation( From 905a5d83d94cba24afa027a7ddc712a093119287 Mon Sep 17 00:00:00 2001 From: Oneeb <258801025+oneeb-nhs@users.noreply.github.com> Date: Thu, 5 Mar 2026 12:40:39 +0000 Subject: [PATCH 29/66] updated test cases for iteration_time --- .../model/campaign_config.py | 2 +- .../test_campaign_config_validator.py | 12 ++++++++---- .../unit/validation/test_iteration_validator.py | 17 +++++++++++++---- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 6c2bfb876..c9847ff6d 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -366,7 +366,7 @@ class CampaignConfig(BaseModel): reviewer: list[str] | None = Field(None, alias="Reviewer") iteration_frequency: Literal["X", "D", "W", "M", "Q", "A"] = Field(..., alias="IterationFrequency") iteration_type: Literal["A", "M", "S", "O"] = Field(..., alias="IterationType") - default_iteration_time: IterationTime = Field(default=IterationTime(time(0, 0, 0)), alias="IterationTime") + default_iteration_time: IterationTime = Field(default=IterationTime(time(0, 0, 0)), alias="DefaultIterationTime") default_comms_routing: str | None = Field(None, alias="DefaultCommsRouting") start_date: StartDate = Field(..., alias="StartDate") end_date: EndDate = Field(..., alias="EndDate") diff --git a/tests/unit/validation/test_campaign_config_validator.py b/tests/unit/validation/test_campaign_config_validator.py index 216369a98..49903011f 100644 --- a/tests/unit/validation/test_campaign_config_validator.py +++ b/tests/unit/validation/test_campaign_config_validator.py @@ -1,3 +1,5 @@ +from datetime import datetime + import pytest from pydantic import ValidationError @@ -165,11 +167,13 @@ def test_reviewer_field(self, reviewer, valid_campaign_config_with_only_mandator model = CampaignConfigValidation(**data) assert model.reviewer == reviewer - @pytest.mark.parametrize("iteration_time", ["14:00", "09:30", "18:45"]) - def test_iteration_time_field(self, iteration_time, valid_campaign_config_with_only_mandatory_fields): - data = {**valid_campaign_config_with_only_mandatory_fields, "IterationTime": iteration_time} + @pytest.mark.parametrize("default_iteration_time", ["14:00:00", "09:30:00", "18:45:00"]) + def test_default_iteration_time_field( + self, default_iteration_time, valid_campaign_config_with_only_mandatory_fields + ): + data = {**valid_campaign_config_with_only_mandatory_fields, "DefaultIterationTime": default_iteration_time} model = CampaignConfigValidation(**data) - assert model.iteration_time == iteration_time + assert model.default_iteration_time == datetime.strptime(default_iteration_time, "%H:%M:%S").time() # noqa: DTZ007 @pytest.mark.parametrize("routing", ["email", "sms", "push"]) def test_default_comms_routing_field(self, routing, valid_campaign_config_with_only_mandatory_fields): diff --git a/tests/unit/validation/test_iteration_validator.py b/tests/unit/validation/test_iteration_validator.py index d258d479a..737ed219e 100644 --- a/tests/unit/validation/test_iteration_validator.py +++ b/tests/unit/validation/test_iteration_validator.py @@ -182,6 +182,15 @@ def test_approval_maximum(self, approval_maximum, valid_campaign_config_with_onl model = IterationValidation(**data) assert model.approval_maximum == approval_maximum + @pytest.mark.parametrize("iteration_time", ["14:00:00", "09:30:00", "18:45:00"]) + def test_iteration_time_field(self, iteration_time, valid_campaign_config_with_only_mandatory_fields): + data = { + **valid_campaign_config_with_only_mandatory_fields["Iterations"][0], + "IterationTime": iteration_time, + } + model = IterationValidation(**data) + assert model.iteration_time == datetime.strptime(iteration_time, "%H:%M:%S").time() # noqa: DTZ007 + class TestBUCValidations: book_local_1_action: ClassVar[dict] = { @@ -510,13 +519,13 @@ def test_invalid_iteration_collects_errors_if_iteration_rules_have_invalid_data( ("iteration_time_input", "default_time_iteration_input", "expected_date_time"), [ # Case 1: Iteration time overrides default - ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0, tzinfo=UTC)), + ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0)), # noqa: DTZ001 # Case 2: Iteration time is missing, so it uses default_iteration_time - (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0, tzinfo=UTC)), + (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0)), # noqa: DTZ001 # Case 3: Both are the same - ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0, tzinfo=UTC)), + ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0)), # noqa: DTZ001 # Case 4: Both are None, falls back to default value (12 AM) in default_iteration_time - (None, None, datetime(2025, 1, 2, 0, 0, 0, tzinfo=UTC)), + (None, None, datetime(2025, 1, 2, 0, 0, 0)), # noqa: DTZ001 ], ) def test_iteration_full_datetime_validation( From 41be3efd147882693bef31a0204dc37700c8e94f Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Thu, 5 Mar 2026 15:40:01 +0000 Subject: [PATCH 30/66] ELI-674 - fixed flaky test, which was due to iteration/campaign factory --- .../services/processors/campaign_evaluator.py | 2 +- tests/fixtures/builders/model/rule.py | 5 ++++- tests/integration/in_process/test_eligibility_endpoint.py | 3 +-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index a63f44c14..248c1c967 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -49,7 +49,7 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf err_msg = ( f"Ambiguous result: '{len(cc_with_max_iteration_date)}' active iterations " f"for target {cc_with_max_iteration_date[0].target} " - f"found for date '{max_date_time}' " + f"found for datetime '{max_date_time}' " f"across campaign(s) {[cc.id for cc in cc_with_max_iteration_date]}" ) raise ValueError(err_msg) diff --git a/tests/fixtures/builders/model/rule.py b/tests/fixtures/builders/model/rule.py index 2793ea032..1313cbb77 100644 --- a/tests/fixtures/builders/model/rule.py +++ b/tests/fixtures/builders/model/rule.py @@ -1,4 +1,4 @@ -from datetime import UTC, date, datetime, timedelta +from datetime import UTC, date, datetime, time, timedelta from operator import attrgetter from random import randint @@ -16,6 +16,7 @@ Iteration, IterationCohort, IterationRule, + IterationTime, RuleAttributeLevel, RuleAttributeName, RuleComparator, @@ -89,6 +90,7 @@ class IterationFactory(ModelFactory[Iteration]): default_comms_routing = "defaultcomms" actions_mapper = Use(ActionsMapperFactory.build) rules_mapper = None + iteration_time = None class RawCampaignConfigFactory(ModelFactory[CampaignConfig]): @@ -96,6 +98,7 @@ class RawCampaignConfigFactory(ModelFactory[CampaignConfig]): id = "42-hi5tch-hi5kers-gu5ide-t2o-t3he-gal6axy" start_date = Use(past_date) end_date = Use(future_date) + default_iteration_time = IterationTime(time(0, 0, 0)) class CampaignConfigFactory(RawCampaignConfigFactory): diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index bc6fe2693..3b6e5c0cb 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1532,11 +1532,10 @@ def test_if_multiple_active_iterations_with_same_iteration_datetime_for_the_same ) ), ) - err_msg = ( "Ambiguous result: '2' active iterations " "for target RSV " - f"found for date '{previous_day}' " + f"found for datetime '{previous_day} 00:00:00' " "across campaign(s) ['RSV_campaign_id_1', 'RSV_campaign_id_2']" ) assert any(err_msg in message for message in caplog.messages), ( From 82e1171f8f4f0efa3172a40b0ac4626ec4aebedf Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 6 Mar 2026 09:18:49 +0000 Subject: [PATCH 31/66] ELI-674 - tweaks to validator --- src/rules_validation_api/app.py | 46 ++- .../validators/campaign_config_validator.py | 9 + .../in_process/test_eligibility_endpoint.py | 6 +- .../test_config/test_config_v1.3.0.json | 372 ++++++++++++++++++ 4 files changed, 425 insertions(+), 8 deletions(-) create mode 100644 tests/test_data/test_config/test_config_v1.3.0.json diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index 43aa6530f..a8fb83587 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -3,6 +3,8 @@ import logging import sys from collections import defaultdict +from datetime import UTC, datetime +from operator import attrgetter from pathlib import Path from pydantic import ValidationError @@ -75,17 +77,51 @@ def main() -> None: # pragma: no cover def display_current_iteration(result: RulesValidation) -> None: no_of_iterations = 0 + + # Current Iteration try: no_of_iterations = len(result.campaign_config.iterations) current = result.campaign_config.current_iteration except StopIteration: current = None - if current is None: - sys.stdout.write(f"{YELLOW}No active iteration could be determined{RESET}\n") - sys.stdout.write(f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n") + is_campaign_live = result.campaign_config.campaign_live + if is_campaign_live: + sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}LIVE{RESET}\n") + if current is None: + sys.stdout.write(f"{YELLOW}No active iteration could be determined{RESET}\n") + else: + sys.stdout.write( + f"{YELLOW}Current active Iteration Number: {RESET}{GREEN}{current.iteration_number}{RESET}\n" + ) + sys.stdout.write( + f"{YELLOW}Current active Iteration's date&time: {RESET}{GREEN}{current.iteration_datetime}{RESET}\n" + ) + + # Next Iteration + try: + sorted_iterations = sorted(result.campaign_config.iterations, key=attrgetter("iteration_date")) + today = datetime.now(tz=UTC).date() + next_iteration = ( + next(i for i in sorted_iterations if i.iteration_date > today) if sorted_iterations else None + ) + except StopIteration: + next_iteration = None + + if next_iteration: + sys.stdout.write( + f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" + ) + sys.stdout.write( + f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" + ) + else: + sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") + else: - sys.stdout.write(f"{YELLOW}Current Iteration Number: {RESET}{GREEN}{current.iteration_number}{RESET}\n") - sys.stdout.write(f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n") + sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}NOT LIVE{RESET}\n") + + # Total no of iterations + sys.stdout.write(f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n") if __name__ == "__main__": # pragma: no cover diff --git a/src/rules_validation_api/validators/campaign_config_validator.py b/src/rules_validation_api/validators/campaign_config_validator.py index b30e77a34..8cf05faf1 100644 --- a/src/rules_validation_api/validators/campaign_config_validator.py +++ b/src/rules_validation_api/validators/campaign_config_validator.py @@ -34,6 +34,15 @@ def validate_iterations_have_unique_id(self) -> typing.Self: raise ValueError(msg) return self + @model_validator(mode="after") + def validate_iterations_have_unique_number(self) -> typing.Self: + numbers = [iteration.iteration_number for iteration in self.iterations] + duplicates = {i_id for i_id, count in Counter(numbers).items() if count > 1} + if duplicates: + msg = f"Iterations contain duplicate numbers: {', '.join(str(i) for i in duplicates)}" + raise ValueError(msg) + return self + @model_validator(mode="after") def validate_campaign_has_iteration_within_schedule(self) -> typing.Self: errors: list[str] = [] diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 3b6e5c0cb..2afca8543 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -33,15 +33,15 @@ from tests.integration.conftest import UNIQUE_CONSUMER_HEADER -def today(): +def today() -> date: return datetime.now(UTC).date() -def yesterday(): +def yesterday() -> date: return datetime.now(UTC).date() - timedelta(days=1) -def tomorrow(): +def tomorrow() -> date: return datetime.now(UTC).date() + timedelta(days=1) diff --git a/tests/test_data/test_config/test_config_v1.3.0.json b/tests/test_data/test_config/test_config_v1.3.0.json new file mode 100644 index 000000000..66d4fec40 --- /dev/null +++ b/tests/test_data/test_config/test_config_v1.3.0.json @@ -0,0 +1,372 @@ +{ + "CampaignConfig": { + "ID": "id_100", + "Version": "1", + "Name": "Test Config", + "Type": "V", + "Target": "RSV", + "Manager": [ + "person@test.com" + ], + "Approver": [ + "person@test.com" + ], + "Reviewer": [ + "person@test.com" + ], + "StartDate": "20250101", + "EndDate": "20270104", + "ApprovalMinimum": 1, + "ApprovalMaximum": 5000000, + "IterationFrequency": "X", + "IterationType": "M", + "DefaultIterationTime": "07:00:00", + "Iterations": [ + { + "ID": "id_100", + "Version": "1", + "Name": "Test Config", + "Type": "M", + "IterationDate": "20270103", + "IterationTime": "07:00:00", + "IterationNumber": 1, + "CommsType": "R", + "ApprovalMinimum": 1, + "ApprovalMaximum": 5000000, + "DefaultCommsRouting": "INTERNALCONTACTGP1", + "DefaultNotActionableRouting": "INTERNALCONTACTGP1", + "DefaultNotEligibleRouting": "INTERNALCONTACTGP1", + "StatusText": { + "NotEligible": "You are not eligible to take RSV vaccines", + "NotActionable": "You have taken RSV vaccine in the last 90 days", + "Actionable": "You can take RSV vaccine" + }, + "ActionsMapper": { + "INTERNALCONTACTGP1": { + "ExternalRoutingCode": "CONTACTGP", + "ActionDescription": "Contact GP Text1 description", + "ActionType": "text1" + }, + "INTERNALCONTACTGP2": { + "ExternalRoutingCode": "CONTACTGP", + "ActionDescription": "Contact GP Link description", + "ActionType": "link", + "UrlLink": "https://www.link123.example", + "UrlLabel": "link label" + }, + "INTERNALTESCO": { + "ExternalRoutingCode": "TESCO", + "ActionDescription": "Tesco description", + "ActionType": "link", + "UrlLink": "https://www.tesco_link.example", + "UrlLabel": "link label" + }, + "INTERNALFINDWALKIN": { + "ExternalRoutingCode": "FINDWALKIN", + "ActionDescription": "Find walkin description", + "ActionType": "button" + }, + "XRULEID1": { + "ExternalRoutingCode": "FINDWALKIN", + "ActionDescription": "Find walkin description", + "ActionType": "button" + }, + "YRULEID1": { + "ExternalRoutingCode": "FINDWALKIN", + "ActionDescription": "Find walkin description", + "ActionType": "button" + } + }, + "IterationCohorts": [ + { + "CohortLabel": "rsv_75_rolling", + "CohortGroup": "rsv_age_range", + "PositiveDescription": "You are currently aged 75 to 79", + "NegativeDescription": "You are not currently aged 75 to 79", + "Priority": 0, + "Virtual": "N" + }, + { + "CohortLabel": "rsv_75to79_2024", + "CohortGroup": "rsv_catch_up_age_range", + "PositiveDescription": "You turned 80 after 1 September 2024, so are eligible for the RSV vaccine until 31 August 2025", + "NegativeDescription": "You did not turn 80 after 1 September 2024 and get vaccinated by 31 August 2025", + "Priority": 10 + }, + { + "CohortLabel": "virtual_rsv_80_since_02_Sept_2024", + "CohortGroup": "rsv_age_catchup", + "PositiveDescription": "You turned 80 after 1 September 2024, so are eligible for the RSV vaccine until 31 August 2025", + "NegativeDescription": "You did not turn 80 after 1 September 2024 and get vaccinated by 31 August 2025", + "Priority": 101, + "Virtual": "Y" + } + ], + "IterationRules": [ + { + "Type": "F", + "Name": "Test Rule", + "Description": "Test Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": "=", + "Comparator": "19000101" + }, + { + "Type": "F", + "Name": "Test Rule", + "Description": "Test Rule Desc", + "Priority": 30, + "AttributeLevel": "PERSON", + "AttributeName": "place of birth", + "Operator": "=", + "Comparator": "london" + }, + { + "Type": "S", + "Name": "Already Vaccinated", + "Description": "Already Vaccinated|You have already been Vaccinated", + "Priority": 30, + "AttributeLevel": "TARGET", + "AttributeTarget": "RSV", + "AttributeName": "LAST_SUCCESSFUL_DATE", + "CohortLabel": "rsv_75to79_2024", + "Operator": "is_not_null", + "Comparator": "" + }, + { + "Type": "S", + "Name": "In Supressed Cohort", + "Description": "In Supressed Cohort|You Are In Supressed Cohort", + "Priority": 40, + "AttributeLevel": "COHORT", + "AttributeName": "COHORT_LABEL", + "Operator": "=", + "Comparator": "rsv_75to79_2024" + }, + { + "Type": "R", + "Name": "Test Redirect Rule", + "Description": "Test Redirect Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "INTERNALCONTACTGP1|INTERNALTESCO" + }, + { + "Type": "X", + "Name": "Test X Rule for not eligible", + "Description": "Test X Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "XRULEID1|INTERNALTESCO" + }, + { + "Type": "Y", + "Name": "Test Y Rule for not actionable", + "Description": "Test Y Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "YRULEID1|INTERNALTESCO" + } + ], + "RulesMapper": { + "ALREADY_JABBED": { + "RuleNames": [ + "Already Vaccinated" + ], + "RuleCode": "Already Jabbed", + "RuleText": "Already Vaccinated|You have already been Vaccinated" + }, + "OTHER_SETTINGS": { + "RuleNames": [ + "In Supressed Cohort" + ], + "RuleCode": "Present in Supressed Cohort" + } + } + }, + { + "ID": "id_101", + "Version": "1", + "Name": "Test Config", + "Type": "M", + "IterationDate": "20260104", + "IterationNumber": 2, + "CommsType": "R", + "ApprovalMinimum": 1, + "ApprovalMaximum": 5000000, + "DefaultCommsRouting": "INTERNALCONTACTGP1", + "DefaultNotActionableRouting": "INTERNALCONTACTGP1", + "DefaultNotEligibleRouting": "INTERNALCONTACTGP1", + "StatusText": { + "NotEligible": "You are not eligible to take RSV vaccines", + "NotActionable": "You have taken RSV vaccine in the last 90 days", + "Actionable": "You can take RSV vaccine" + }, + "ActionsMapper": { + "INTERNALCONTACTGP1": { + "ExternalRoutingCode": "CONTACTGP", + "ActionDescription": "Contact GP Text1 description", + "ActionType": "text1" + }, + "INTERNALCONTACTGP2": { + "ExternalRoutingCode": "CONTACTGP", + "ActionDescription": "Contact GP Link description", + "ActionType": "link", + "UrlLink": "https://www.link123.example", + "UrlLabel": "link label" + }, + "INTERNALTESCO": { + "ExternalRoutingCode": "TESCO", + "ActionDescription": "Tesco description", + "ActionType": "link", + "UrlLink": "https://www.tesco_link.example", + "UrlLabel": "link label" + }, + "INTERNALFINDWALKIN": { + "ExternalRoutingCode": "FINDWALKIN", + "ActionDescription": "Find walkin description", + "ActionType": "button" + }, + "XRULEID1": { + "ExternalRoutingCode": "FINDWALKIN", + "ActionDescription": "Find walkin description", + "ActionType": "button" + }, + "YRULEID1": { + "ExternalRoutingCode": "FINDWALKIN", + "ActionDescription": "Find walkin description", + "ActionType": "button" + } + }, + "IterationCohorts": [ + { + "CohortLabel": "rsv_75_rolling", + "CohortGroup": "rsv_age_range", + "PositiveDescription": "You are currently aged 75 to 79", + "NegativeDescription": "You are not currently aged 75 to 79", + "Priority": 0, + "Virtual": "N" + }, + { + "CohortLabel": "rsv_75to79_2024", + "CohortGroup": "rsv_catch_up_age_range", + "PositiveDescription": "You turned 80 after 1 September 2024, so are eligible for the RSV vaccine until 31 August 2025", + "NegativeDescription": "You did not turn 80 after 1 September 2024 and get vaccinated by 31 August 2025", + "Priority": 10 + }, + { + "CohortLabel": "virtual_rsv_80_since_02_Sept_2024", + "CohortGroup": "rsv_age_catchup", + "PositiveDescription": "You turned 80 after 1 September 2024, so are eligible for the RSV vaccine until 31 August 2025", + "NegativeDescription": "You did not turn 80 after 1 September 2024 and get vaccinated by 31 August 2025", + "Priority": 101, + "Virtual": "Y" + } + ], + "IterationRules": [ + { + "Type": "F", + "Name": "Test Rule", + "Description": "Test Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": "=", + "Comparator": "19000101" + }, + { + "Type": "F", + "Name": "Test Rule", + "Description": "Test Rule Desc", + "Priority": 30, + "AttributeLevel": "PERSON", + "AttributeName": "place of birth", + "Operator": "=", + "Comparator": "london" + }, + { + "Type": "S", + "Name": "Already Vaccinated", + "Description": "Already Vaccinated|You have already been Vaccinated", + "Priority": 30, + "AttributeLevel": "TARGET", + "AttributeTarget": "RSV", + "AttributeName": "LAST_SUCCESSFUL_DATE", + "CohortLabel": "rsv_75to79_2024", + "Operator": "is_not_null", + "Comparator": "" + }, + { + "Type": "S", + "Name": "In Supressed Cohort", + "Description": "In Supressed Cohort|You Are In Supressed Cohort", + "Priority": 40, + "AttributeLevel": "COHORT", + "AttributeName": "COHORT_LABEL", + "Operator": "=", + "Comparator": "rsv_75to79_2024" + }, + { + "Type": "R", + "Name": "Test Redirect Rule", + "Description": "Test Redirect Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "INTERNALCONTACTGP1|INTERNALTESCO" + }, + { + "Type": "X", + "Name": "Test X Rule for not eligible", + "Description": "Test X Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "XRULEID1|INTERNALTESCO" + }, + { + "Type": "Y", + "Name": "Test Y Rule for not actionable", + "Description": "Test Y Rule Desc", + "Priority": 20, + "AttributeLevel": "PERSON", + "AttributeName": "DATE_OF_BIRTH", + "Operator": ">", + "Comparator": "19000101", + "CommsRouting": "YRULEID1|INTERNALTESCO" + } + ], + "RulesMapper": { + "ALREADY_JABBED": { + "RuleNames": [ + "Already Vaccinated" + ], + "RuleCode": "Already Jabbed", + "RuleText": "Already Vaccinated|You have already been Vaccinated" + }, + "OTHER_SETTINGS": { + "RuleNames": [ + "In Supressed Cohort" + ], + "RuleCode": "Present in Supressed Cohort" + } + } + } + ] + } +} From 3dd7db10b4167cdaead2a2cdb57a703405c0ee2f Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 6 Mar 2026 09:45:45 +0000 Subject: [PATCH 32/66] ELI-674 - tweaks to validator --- src/rules_validation_api/app.py | 41 +++++++++++++++++---------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index a8fb83587..51748e6d3 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -97,29 +97,30 @@ def display_current_iteration(result: RulesValidation) -> None: f"{YELLOW}Current active Iteration's date&time: {RESET}{GREEN}{current.iteration_datetime}{RESET}\n" ) - # Next Iteration - try: - sorted_iterations = sorted(result.campaign_config.iterations, key=attrgetter("iteration_date")) - today = datetime.now(tz=UTC).date() - next_iteration = ( - next(i for i in sorted_iterations if i.iteration_date > today) if sorted_iterations else None - ) - except StopIteration: - next_iteration = None - - if next_iteration: - sys.stdout.write( - f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" - ) - sys.stdout.write( - f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" - ) - else: - sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") - else: sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}NOT LIVE{RESET}\n") + # Next Iteration + try: + sorted_iterations = sorted(result.campaign_config.iterations, key=attrgetter("iteration_date")) + today = datetime.now(tz=UTC).date() + next_iteration = ( + next(i for i in sorted_iterations if i.iteration_date > today) if sorted_iterations else None + ) + except StopIteration: + next_iteration = None + + if next_iteration: + sys.stdout.write( + f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" + ) + sys.stdout.write( + f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" + ) + else: + sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") + + # Total no of iterations sys.stdout.write(f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n") From f83f492a6bdaee3465bdbb0eebc6fa5b165f6896 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:19:44 +0000 Subject: [PATCH 33/66] ELI-674 - tweaks to validator --- tests/test_data/test_config/test_config_v1.3.0.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_data/test_config/test_config_v1.3.0.json b/tests/test_data/test_config/test_config_v1.3.0.json index 66d4fec40..c6f7eb428 100644 --- a/tests/test_data/test_config/test_config_v1.3.0.json +++ b/tests/test_data/test_config/test_config_v1.3.0.json @@ -14,7 +14,7 @@ "Reviewer": [ "person@test.com" ], - "StartDate": "20250101", + "StartDate": "20270101", "EndDate": "20270104", "ApprovalMinimum": 1, "ApprovalMaximum": 5000000, @@ -200,7 +200,7 @@ "Version": "1", "Name": "Test Config", "Type": "M", - "IterationDate": "20260104", + "IterationDate": "20270102", "IterationNumber": 2, "CommsType": "R", "ApprovalMinimum": 1, From 48fee791e8c6d601b031512607fe35064d52d6fa Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:23:20 +0000 Subject: [PATCH 34/66] ELI-674 - tweaks to validator --- src/rules_validation_api/app.py | 67 +++++++++++++++++---------------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index 51748e6d3..742559cce 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -76,54 +76,57 @@ def main() -> None: # pragma: no cover def display_current_iteration(result: RulesValidation) -> None: - no_of_iterations = 0 + config = result.campaign_config + iterations = config.iterations + is_campaign_live = config.campaign_live + today = datetime.now(tz=UTC).date() - # Current Iteration - try: - no_of_iterations = len(result.campaign_config.iterations) - current = result.campaign_config.current_iteration - except StopIteration: - current = None - is_campaign_live = result.campaign_config.campaign_live + no_of_iterations = len(iterations) + is_campaign_expired = config.end_date < today + + # ---- Current Iteration ---- if is_campaign_live: sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}LIVE{RESET}\n") - if current is None: - sys.stdout.write(f"{YELLOW}No active iteration could be determined{RESET}\n") - else: + current = config.current_iteration + if current: sys.stdout.write( f"{YELLOW}Current active Iteration Number: {RESET}{GREEN}{current.iteration_number}{RESET}\n" ) sys.stdout.write( f"{YELLOW}Current active Iteration's date&time: {RESET}{GREEN}{current.iteration_datetime}{RESET}\n" ) + else: + sys.stdout.write(f"{YELLOW}No active iteration could be determined{RESET}\n") else: - sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}NOT LIVE{RESET}\n") + sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}NOT LIVE{RESET}") - # Next Iteration - try: - sorted_iterations = sorted(result.campaign_config.iterations, key=attrgetter("iteration_date")) - today = datetime.now(tz=UTC).date() - next_iteration = ( - next(i for i in sorted_iterations if i.iteration_date > today) if sorted_iterations else None - ) - except StopIteration: - next_iteration = None + if is_campaign_expired: + sys.stdout.write(f"{YELLOW}[EXPIRED on {config.end_date}]{RESET}\n") + else: + sys.stdout.write(f"{YELLOW}[To be STARTED on {RESET}{GREEN}{config.start_date}{RESET}{YELLOW}]{RESET}\n") - if next_iteration: - sys.stdout.write( - f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" + # ---- Next Iteration ---- + if not is_campaign_expired: + sorted_iterations = sorted(iterations, key=attrgetter("iteration_date")) + next_iteration = next( + (i for i in sorted_iterations if i.iteration_date > today), None ) - sys.stdout.write( - f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" - ) - else: - sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") + if next_iteration: + sys.stdout.write( + f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" + ) + sys.stdout.write( + f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" + ) + else: + sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") - # Total no of iterations - sys.stdout.write(f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n") - + # ---- Total Iterations ---- + sys.stdout.write( + f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n" + ) if __name__ == "__main__": # pragma: no cover main() From 6d8e8485ead0c1feaa877d3f863f01bc80be991e Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:50:38 +0000 Subject: [PATCH 35/66] ELI-674 - tweaks to validator --- src/rules_validation_api/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index 742559cce..8f3fdb6f9 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -99,7 +99,7 @@ def display_current_iteration(result: RulesValidation) -> None: sys.stdout.write(f"{YELLOW}No active iteration could be determined{RESET}\n") else: - sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}NOT LIVE{RESET}") + sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}NOT LIVE{RESET} ") if is_campaign_expired: sys.stdout.write(f"{YELLOW}[EXPIRED on {config.end_date}]{RESET}\n") From 51abd8932de4caab9476e0956eb6f68532317327 Mon Sep 17 00:00:00 2001 From: Oneeb <258801025+oneeb-nhs@users.noreply.github.com> Date: Fri, 6 Mar 2026 15:34:04 +0000 Subject: [PATCH 36/66] reverted default_iteration_time to iteration_time --- .../model/campaign_config.py | 8 ++-- src/rules_validation_api/app.py | 41 +++++++++++-------- tests/fixtures/builders/model/rule.py | 2 +- .../test_config/test_config_v1.3.0.json | 2 +- tests/unit/validation/test_app.py | 3 ++ .../test_campaign_config_validator.py | 10 ++--- .../validation/test_iteration_validator.py | 6 +-- 7 files changed, 40 insertions(+), 32 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index c9847ff6d..3bad4892b 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -344,7 +344,7 @@ def iteration_datetime(self) -> datetime: if self.iteration_time: iteration_time = self.iteration_time elif self._parent: - iteration_time = self._parent.default_iteration_time + iteration_time = self._parent.iteration_time else: msg = f"No iteration_time and no parent linked for iteration {self.id}" raise ValueError(msg) @@ -366,7 +366,7 @@ class CampaignConfig(BaseModel): reviewer: list[str] | None = Field(None, alias="Reviewer") iteration_frequency: Literal["X", "D", "W", "M", "Q", "A"] = Field(..., alias="IterationFrequency") iteration_type: Literal["A", "M", "S", "O"] = Field(..., alias="IterationType") - default_iteration_time: IterationTime = Field(default=IterationTime(time(0, 0, 0)), alias="DefaultIterationTime") + iteration_time: IterationTime = Field(default=IterationTime(time(0, 0, 0)), alias="IterationTime") default_comms_routing: str | None = Field(None, alias="DefaultCommsRouting") start_date: StartDate = Field(..., alias="StartDate") end_date: EndDate = Field(..., alias="EndDate") @@ -400,7 +400,7 @@ def parse_dates(cls, v: str | date) -> date: msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." raise ValueError(msg) from err - @field_validator("default_iteration_time", mode="before") + @field_validator("iteration_time", mode="before") @classmethod def parse_times(cls, v: str | time) -> time | None: if not v: @@ -426,7 +426,7 @@ def parse_times(cls, v: str | time) -> time | None: def serialize_dates(v: date, _info: SerializationInfo) -> str: return v.strftime("%Y%m%d") - @field_serializer("default_iteration_time", when_used="always") + @field_serializer("iteration_time", when_used="always") @staticmethod def serialize_time(v: time, _info: SerializationInfo) -> str | None: return v.strftime("%H:%M:%S") if v else None diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index 8f3fdb6f9..e1af12870 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -88,14 +88,17 @@ def display_current_iteration(result: RulesValidation) -> None: if is_campaign_live: sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}LIVE{RESET}\n") current = config.current_iteration - if current: - sys.stdout.write( - f"{YELLOW}Current active Iteration Number: {RESET}{GREEN}{current.iteration_number}{RESET}\n" - ) - sys.stdout.write( - f"{YELLOW}Current active Iteration's date&time: {RESET}{GREEN}{current.iteration_datetime}{RESET}\n" - ) - else: + + try: + current = config.current_iteration + if current: + sys.stdout.write( + f"{YELLOW}Current active Iteration Number: {RESET}{GREEN}{current.iteration_number}{RESET}\n" + ) + sys.stdout.write( + f"{YELLOW}Current active Iteration's date&time: {RESET}{GREEN}{current.iteration_datetime}{RESET}\n" + ) + except StopIteration: sys.stdout.write(f"{YELLOW}No active iteration could be determined{RESET}\n") else: @@ -109,18 +112,20 @@ def display_current_iteration(result: RulesValidation) -> None: # ---- Next Iteration ---- if not is_campaign_expired: sorted_iterations = sorted(iterations, key=attrgetter("iteration_date")) - next_iteration = next( - (i for i in sorted_iterations if i.iteration_date > today), None - ) - if next_iteration: - sys.stdout.write( - f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" + try: + next_iteration = next( + (i for i in sorted_iterations if i.iteration_date > today), None ) - sys.stdout.write( - f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" - ) - else: + + if next_iteration: + sys.stdout.write( + f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" + ) + sys.stdout.write( + f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" + ) + except StopIteration: sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") # ---- Total Iterations ---- diff --git a/tests/fixtures/builders/model/rule.py b/tests/fixtures/builders/model/rule.py index 1313cbb77..a7bdf2cbc 100644 --- a/tests/fixtures/builders/model/rule.py +++ b/tests/fixtures/builders/model/rule.py @@ -98,7 +98,7 @@ class RawCampaignConfigFactory(ModelFactory[CampaignConfig]): id = "42-hi5tch-hi5kers-gu5ide-t2o-t3he-gal6axy" start_date = Use(past_date) end_date = Use(future_date) - default_iteration_time = IterationTime(time(0, 0, 0)) + iteration_time = IterationTime(time(0, 0, 0)) class CampaignConfigFactory(RawCampaignConfigFactory): diff --git a/tests/test_data/test_config/test_config_v1.3.0.json b/tests/test_data/test_config/test_config_v1.3.0.json index c6f7eb428..ae53e9506 100644 --- a/tests/test_data/test_config/test_config_v1.3.0.json +++ b/tests/test_data/test_config/test_config_v1.3.0.json @@ -20,7 +20,7 @@ "ApprovalMaximum": 5000000, "IterationFrequency": "X", "IterationType": "M", - "DefaultIterationTime": "07:00:00", + "IterationTime": "07:00:00", "Iterations": [ { "ID": "id_100", diff --git a/tests/unit/validation/test_app.py b/tests/unit/validation/test_app.py index 0c88bea80..fc5b83a8a 100644 --- a/tests/unit/validation/test_app.py +++ b/tests/unit/validation/test_app.py @@ -1,6 +1,7 @@ import sys from io import StringIO from unittest.mock import Mock, PropertyMock +from datetime import UTC, datetime, timedelta from pydantic import BaseModel, ValidationError @@ -84,6 +85,8 @@ def test_no_current_iteration(): # iterations must be a list, not a Mock result.campaign_config.iterations = [] + result.campaign_config.end_date = datetime.now(UTC).date() + timedelta(days=1) + # current_iteration should raise StopIteration type(result.campaign_config).current_iteration = PropertyMock(side_effect=StopIteration) diff --git a/tests/unit/validation/test_campaign_config_validator.py b/tests/unit/validation/test_campaign_config_validator.py index 49903011f..2d89cc683 100644 --- a/tests/unit/validation/test_campaign_config_validator.py +++ b/tests/unit/validation/test_campaign_config_validator.py @@ -167,13 +167,13 @@ def test_reviewer_field(self, reviewer, valid_campaign_config_with_only_mandator model = CampaignConfigValidation(**data) assert model.reviewer == reviewer - @pytest.mark.parametrize("default_iteration_time", ["14:00:00", "09:30:00", "18:45:00"]) - def test_default_iteration_time_field( - self, default_iteration_time, valid_campaign_config_with_only_mandatory_fields + @pytest.mark.parametrize("iteration_time", ["14:00:00", "09:30:00", "18:45:00"]) + def test_iteration_time_field( + self, iteration_time, valid_campaign_config_with_only_mandatory_fields ): - data = {**valid_campaign_config_with_only_mandatory_fields, "DefaultIterationTime": default_iteration_time} + data = {**valid_campaign_config_with_only_mandatory_fields, "IterationTime": iteration_time} model = CampaignConfigValidation(**data) - assert model.default_iteration_time == datetime.strptime(default_iteration_time, "%H:%M:%S").time() # noqa: DTZ007 + assert model.iteration_time == datetime.strptime(iteration_time, "%H:%M:%S").time() # noqa: DTZ007 @pytest.mark.parametrize("routing", ["email", "sms", "push"]) def test_default_comms_routing_field(self, routing, valid_campaign_config_with_only_mandatory_fields): diff --git a/tests/unit/validation/test_iteration_validator.py b/tests/unit/validation/test_iteration_validator.py index 737ed219e..06ef6e49f 100644 --- a/tests/unit/validation/test_iteration_validator.py +++ b/tests/unit/validation/test_iteration_validator.py @@ -520,11 +520,11 @@ def test_invalid_iteration_collects_errors_if_iteration_rules_have_invalid_data( [ # Case 1: Iteration time overrides default ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0)), # noqa: DTZ001 - # Case 2: Iteration time is missing, so it uses default_iteration_time + # Case 2: Iteration time is missing, so it uses campaign config iteration_time (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0)), # noqa: DTZ001 # Case 3: Both are the same ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0)), # noqa: DTZ001 - # Case 4: Both are None, falls back to default value (12 AM) in default_iteration_time + # Case 4: Both are None, falls back to default value (12 AM) in campaign config iteration_time (None, None, datetime(2025, 1, 2, 0, 0, 0)), # noqa: DTZ001 ], ) @@ -544,7 +544,7 @@ def test_iteration_full_datetime_validation( data = valid_campaign_config_with_only_mandatory_fields.copy() if default_time_iteration_input: - data["default_iteration_time"] = default_time_iteration_input + data["iteration_time"] = default_time_iteration_input data["Iterations"] = [iteration_data] From cc9873ac012be8e53f7709d62826f4bf30fa1496 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Sun, 8 Mar 2026 13:45:13 +0000 Subject: [PATCH 37/66] ELI-674 - fix errors --- src/rules_validation_api/app.py | 2 -- tests/unit/validation/test_app.py | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index e1af12870..533b1050b 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -87,8 +87,6 @@ def display_current_iteration(result: RulesValidation) -> None: # ---- Current Iteration ---- if is_campaign_live: sys.stdout.write(f"{YELLOW}Campaign is {RESET}{GREEN}LIVE{RESET}\n") - current = config.current_iteration - try: current = config.current_iteration if current: diff --git a/tests/unit/validation/test_app.py b/tests/unit/validation/test_app.py index fc5b83a8a..5af9c32d7 100644 --- a/tests/unit/validation/test_app.py +++ b/tests/unit/validation/test_app.py @@ -112,6 +112,7 @@ def test_current_iteration_exists(): result.campaign_config = Mock() result.campaign_config.iterations = [mock_iteration] + result.campaign_config.end_date = datetime.now(UTC).date() + timedelta(days=1) type(result.campaign_config).current_iteration = PropertyMock(return_value=mock_iteration) From fcc1fec923cdc74194c06cce94cc3c77c1cb222e Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Sun, 8 Mar 2026 14:21:32 +0000 Subject: [PATCH 38/66] ELI-674 - new test case for validators --- src/rules_validation_api/app.py | 12 +++---- tests/unit/validation/test_app.py | 34 +++++++++++++++++-- .../test_campaign_config_validator.py | 4 +-- 3 files changed, 38 insertions(+), 12 deletions(-) diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index 533b1050b..62ebc4c95 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -112,24 +112,22 @@ def display_current_iteration(result: RulesValidation) -> None: sorted_iterations = sorted(iterations, key=attrgetter("iteration_date")) try: - next_iteration = next( - (i for i in sorted_iterations if i.iteration_date > today), None - ) + next_iteration = next((i for i in sorted_iterations if i.iteration_date > today), None) if next_iteration: sys.stdout.write( f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" ) sys.stdout.write( - f"{YELLOW}Next active Iteration's date&time: {RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" + f"{YELLOW}Next active Iteration's date&time: " + f"{RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" ) except StopIteration: sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") # ---- Total Iterations ---- - sys.stdout.write( - f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n" - ) + sys.stdout.write(f"{YELLOW}Total iterations configured: {RESET}{GREEN}{no_of_iterations}{RESET}\n") + if __name__ == "__main__": # pragma: no cover main() diff --git a/tests/unit/validation/test_app.py b/tests/unit/validation/test_app.py index 5af9c32d7..fe055d011 100644 --- a/tests/unit/validation/test_app.py +++ b/tests/unit/validation/test_app.py @@ -1,7 +1,7 @@ import sys +from datetime import UTC, datetime, timedelta from io import StringIO from unittest.mock import Mock, PropertyMock -from datetime import UTC, datetime, timedelta from pydantic import BaseModel, ValidationError @@ -107,6 +107,7 @@ def test_current_iteration_exists(): # Arrange mock_iteration = Mock() mock_iteration.iteration_number = 7 + mock_iteration.iteration_date = datetime.now(UTC).date() - timedelta(days=1) result = Mock() result.campaign_config = Mock() @@ -123,5 +124,34 @@ def test_current_iteration_exists(): sys.stdout = sys.__stdout__ - assert "Current Iteration Number:" in captured.getvalue() + assert "Current active Iteration Number:" in captured.getvalue() assert "7" in captured.getvalue() + + +def test_next_iteration_exists(): + # Given + today = datetime.now(UTC).date() + + # Setup + next_mock = Mock() + next_mock.iteration_number = 8 + next_mock.iteration_date = today + timedelta(days=5) + next_mock.iteration_datetime = today + timedelta(days=5) + + result = Mock() + result.campaign_config.end_date = today + timedelta(days=10) + result.campaign_config.iterations = [next_mock] + result.campaign_config.campaign_live = False # To focus only on Next Iteration output + + captured = StringIO() + sys.stdout = captured + + # When + display_current_iteration(result) + sys.stdout = sys.__stdout__ + output = captured.getvalue() + + # Then + assert "Next active Iteration Number:" in output + assert "8" in output + assert str(today + timedelta(days=5)) in output diff --git a/tests/unit/validation/test_campaign_config_validator.py b/tests/unit/validation/test_campaign_config_validator.py index 2d89cc683..4edf9ba8e 100644 --- a/tests/unit/validation/test_campaign_config_validator.py +++ b/tests/unit/validation/test_campaign_config_validator.py @@ -168,9 +168,7 @@ def test_reviewer_field(self, reviewer, valid_campaign_config_with_only_mandator assert model.reviewer == reviewer @pytest.mark.parametrize("iteration_time", ["14:00:00", "09:30:00", "18:45:00"]) - def test_iteration_time_field( - self, iteration_time, valid_campaign_config_with_only_mandatory_fields - ): + def test_iteration_time_field(self, iteration_time, valid_campaign_config_with_only_mandatory_fields): data = {**valid_campaign_config_with_only_mandatory_fields, "IterationTime": iteration_time} model = CampaignConfigValidation(**data) assert model.iteration_time == datetime.strptime(iteration_time, "%H:%M:%S").time() # noqa: DTZ007 From de258e9a21e27820e76e3a3f9dc30790f0c15f08 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 11:08:01 +0000 Subject: [PATCH 39/66] ELI-674 - linting --- src/eligibility_signposting_api/model/campaign_config.py | 2 +- src/rules_validation_api/app.py | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 3bad4892b..40b6e505a 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -349,7 +349,7 @@ def iteration_datetime(self) -> datetime: msg = f"No iteration_time and no parent linked for iteration {self.id}" raise ValueError(msg) - return datetime.combine(self.iteration_date, iteration_time) + return datetime.combine(self.iteration_date, iteration_time).replace(tzinfo=UTC) def __str__(self) -> str: return json.dumps(self.model_dump(by_alias=True), indent=2) diff --git a/src/rules_validation_api/app.py b/src/rules_validation_api/app.py index 62ebc4c95..0d990ff3c 100644 --- a/src/rules_validation_api/app.py +++ b/src/rules_validation_api/app.py @@ -93,8 +93,10 @@ def display_current_iteration(result: RulesValidation) -> None: sys.stdout.write( f"{YELLOW}Current active Iteration Number: {RESET}{GREEN}{current.iteration_number}{RESET}\n" ) + tz = current.iteration_datetime.tzinfo sys.stdout.write( - f"{YELLOW}Current active Iteration's date&time: {RESET}{GREEN}{current.iteration_datetime}{RESET}\n" + f"{YELLOW}Current active Iteration's date&time: " + f"{RESET}{GREEN}{current.iteration_datetime} ({tz}){RESET}\n" ) except StopIteration: sys.stdout.write(f"{YELLOW}No active iteration could be determined{RESET}\n") @@ -118,9 +120,10 @@ def display_current_iteration(result: RulesValidation) -> None: sys.stdout.write( f"{YELLOW}Next active Iteration Number: {RESET}{GREEN}{next_iteration.iteration_number}{RESET}\n" ) + tz = next_iteration.iteration_datetime.tzinfo sys.stdout.write( f"{YELLOW}Next active Iteration's date&time: " - f"{RESET}{GREEN}{next_iteration.iteration_datetime}{RESET}\n" + f"{RESET}{GREEN}{next_iteration.iteration_datetime} ({tz}){RESET}\n" ) except StopIteration: sys.stdout.write(f"{YELLOW}No next active iteration could be determined{RESET}\n") From 47f65ab8aa2b4522168decee4105da7d56efc581 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 11:16:57 +0000 Subject: [PATCH 40/66] ELI-674 - fixed unit tests --- tests/unit/validation/test_app.py | 6 +++++- tests/unit/validation/test_iteration_validator.py | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/unit/validation/test_app.py b/tests/unit/validation/test_app.py index fe055d011..1fb9671a9 100644 --- a/tests/unit/validation/test_app.py +++ b/tests/unit/validation/test_app.py @@ -136,7 +136,11 @@ def test_next_iteration_exists(): next_mock = Mock() next_mock.iteration_number = 8 next_mock.iteration_date = today + timedelta(days=5) - next_mock.iteration_datetime = today + timedelta(days=5) + next_mock.iteration_datetime = datetime.combine( + next_mock.iteration_date, + datetime.min.time(), + tzinfo=UTC, + ) result = Mock() result.campaign_config.end_date = today + timedelta(days=10) diff --git a/tests/unit/validation/test_iteration_validator.py b/tests/unit/validation/test_iteration_validator.py index 06ef6e49f..0c65e50cf 100644 --- a/tests/unit/validation/test_iteration_validator.py +++ b/tests/unit/validation/test_iteration_validator.py @@ -519,13 +519,13 @@ def test_invalid_iteration_collects_errors_if_iteration_rules_have_invalid_data( ("iteration_time_input", "default_time_iteration_input", "expected_date_time"), [ # Case 1: Iteration time overrides default - ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0)), # noqa: DTZ001 + ("14:30:00", "09:00:00", datetime(2025, 1, 2, 14, 30, 0, tzinfo=UTC)), # Case 2: Iteration time is missing, so it uses campaign config iteration_time - (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0)), # noqa: DTZ001 + (None, "09:00:00", datetime(2025, 1, 2, 9, 0, 0, tzinfo=UTC)), # Case 3: Both are the same - ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0)), # noqa: DTZ001 + ("10:00:00", "10:00:00", datetime(2025, 1, 2, 10, 0, 0, tzinfo=UTC)), # Case 4: Both are None, falls back to default value (12 AM) in campaign config iteration_time - (None, None, datetime(2025, 1, 2, 0, 0, 0)), # noqa: DTZ001 + (None, None, datetime(2025, 1, 2, 0, 0, 0, tzinfo=UTC)), ], ) def test_iteration_full_datetime_validation( From 65bc1e5450bf29c1ae253a41d86fbfcba86037a3 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 11:20:36 +0000 Subject: [PATCH 41/66] ELI-674 - fixed integration tests --- tests/integration/in_process/test_eligibility_endpoint.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 2afca8543..a7ce2aea3 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1535,7 +1535,7 @@ def test_if_multiple_active_iterations_with_same_iteration_datetime_for_the_same err_msg = ( "Ambiguous result: '2' active iterations " "for target RSV " - f"found for datetime '{previous_day} 00:00:00' " + f"found for datetime '{previous_day} 00:00:00+00:00' " "across campaign(s) ['RSV_campaign_id_1', 'RSV_campaign_id_2']" ) assert any(err_msg in message for message in caplog.messages), ( From 584b4c0fa1969f2ac408e4814ce255117bb6d9e4 Mon Sep 17 00:00:00 2001 From: eddalmond1 <102675624+eddalmond1@users.noreply.github.com> Date: Wed, 4 Mar 2026 10:43:42 +0000 Subject: [PATCH 42/66] Create *.instructions.md --- .github/instructions/*.instructions.md | 409 +++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100644 .github/instructions/*.instructions.md diff --git a/.github/instructions/*.instructions.md b/.github/instructions/*.instructions.md new file mode 100644 index 000000000..303eabcbb --- /dev/null +++ b/.github/instructions/*.instructions.md @@ -0,0 +1,409 @@ +--- +description: 'Generic code review instructions that can be customized for any project using GitHub Copilot' +applyTo: '**' +excludeAgent: ["coding-agent"] + +--- + +# Generic Code Review Instructions + +Comprehensive code review guidelines for GitHub Copilot that can be adapted to any project. These instructions follow best practices from prompt engineering and provide a structured approach to code quality, security, testing, and architecture review. + +## Review Language + +When performing a code review, respond in **English** (or specify your preferred language). + +> **Customization Tip**: Change to your preferred language by replacing "English" with "Portuguese (Brazilian)", "Spanish", "French", etc. + +## Review Priorities + +When performing a code review, prioritize issues in the following order: + +### 🔴 CRITICAL (Block merge) + +- **Security**: Vulnerabilities, exposed secrets, authentication/authorization issues +- **Correctness**: Logic errors, data corruption risks, race conditions +- **Breaking Changes**: API contract changes without versioning +- **Data Loss**: Risk of data loss or corruption + +### 🟡 IMPORTANT (Requires discussion) + +- **Code Quality**: Severe violations of SOLID principles, excessive duplication +- **Test Coverage**: Missing tests for critical paths or new functionality +- **Performance**: Obvious performance bottlenecks (N+1 queries, memory leaks) +- **Architecture**: Significant deviations from established patterns + +### 🟢 SUGGESTION (Non-blocking improvements) + +- **Readability**: Poor naming, complex logic that could be simplified +- **Optimization**: Performance improvements without functional impact +- **Best Practices**: Minor deviations from conventions +- **Documentation**: Missing or incomplete comments/documentation + +## General Review Principles + +When performing a code review, follow these principles: + +1. **Be specific**: Reference exact lines, files, and provide concrete examples +2. **Provide context**: Explain WHY something is an issue and the potential impact +3. **Suggest solutions**: Show corrected code when applicable, not just what's wrong +4. **Be constructive**: Focus on improving the code, not criticizing the author +5. **Recognize good practices**: Acknowledge well-written code and smart solutions +6. **Be pragmatic**: Not every suggestion needs immediate implementation +7. **Group related comments**: Avoid multiple comments about the same topic + +## Code Quality Standards + +When performing a code review, check for: + +### Clean Code + +- Descriptive and meaningful names for variables, functions, and classes +- Single Responsibility Principle: each function/class does one thing well +- DRY (Don't Repeat Yourself): no code duplication +- Functions should be small and focused (ideally < 20-30 lines) +- Avoid deeply nested code (max 3-4 levels) +- Avoid magic numbers and strings (use constants) +- Code should be self-documenting; comments only when necessary + +### Examples + +```javascript +// ❌ BAD: Poor naming and magic numbers +function calc(x, y) { + if (x > 100) return y * 0.15; + return y * 0.10; +} + +// ✅ GOOD: Clear naming and constants +const PREMIUM_THRESHOLD = 100; +const PREMIUM_DISCOUNT_RATE = 0.15; +const STANDARD_DISCOUNT_RATE = 0.10; + +function calculateDiscount(orderTotal, itemPrice) { + const isPremiumOrder = orderTotal > PREMIUM_THRESHOLD; + const discountRate = isPremiumOrder ? PREMIUM_DISCOUNT_RATE : STANDARD_DISCOUNT_RATE; + return itemPrice * discountRate; +} +``` + +### Error Handling + +- Proper error handling at appropriate levels +- Meaningful error messages +- No silent failures or ignored exceptions +- Fail fast: validate inputs early +- Use appropriate error types/exceptions + +### Examples + +```python +# ❌ BAD: Silent failure and generic error +def process_user(user_id): + try: + user = db.get(user_id) + user.process() + except: + pass + +# ✅ GOOD: Explicit error handling +def process_user(user_id): + if not user_id or user_id <= 0: + raise ValueError(f"Invalid user_id: {user_id}") + + try: + user = db.get(user_id) + except UserNotFoundError: + raise UserNotFoundError(f"User {user_id} not found in database") + except DatabaseError as e: + raise ProcessingError(f"Failed to retrieve user {user_id}: {e}") + + return user.process() +``` + +## Security Review + +When performing a code review, check for security issues: + +- **Sensitive Data**: No passwords, API keys, tokens, or PII in code or logs +- **Input Validation**: All user inputs are validated and sanitized +- **SQL Injection**: Use parameterized queries, never string concatenation +- **Authentication**: Proper authentication checks before accessing resources +- **Authorization**: Verify user has permission to perform action +- **Cryptography**: Use established libraries, never roll your own crypto +- **Dependency Security**: Check for known vulnerabilities in dependencies + +### Examples + +```javascript +// ❌ BAD: Exposed secret in code +const API_KEY = "sk_live_abc123xyz789"; + +// ✅ GOOD: Use environment variables +const API_KEY = process.env.API_KEY; +``` + +## Testing Standards + +When performing a code review, verify test quality: + +- **Coverage**: Critical paths and new functionality must have tests +- **Test Names**: Descriptive names that explain what is being tested +- **Test Structure**: Clear Arrange-Act-Assert or Given-When-Then pattern +- **Independence**: Tests should not depend on each other or external state +- **Assertions**: Use specific assertions using HamCrest, avoid generic assertTrue/assertFalse +- **Edge Cases**: Test boundary conditions, null values, empty collections +- **Mock Appropriately**: Mock external dependencies, not domain logic + +### Examples + +```typescript +// ❌ BAD: Vague name and assertion +test('test1', () => { + const result = calc(5, 10); + expect(result).toBeTruthy(); +}); + +// ✅ GOOD: Descriptive name and specific assertion +test('should calculate 10% discount for orders under $100', () => { + const orderTotal = 50; + const itemPrice = 20; + + const discount = calculateDiscount(orderTotal, itemPrice); + + expect(discount).toBe(2.00); +}); +``` + +## Performance Considerations + +When performing a code review, check for performance issues: + +- **Database Queries**: Avoid N+1 queries, use proper indexing +- **Algorithms**: Appropriate time/space complexity for the use case +- **Caching**: Utilize caching for expensive or repeated operations +- **Resource Management**: Proper cleanup of connections, files, streams +- **Lazy Loading**: Load data only when needed + +### Examples + +```python +# ❌ BAD: N+1 query problem +users = User.query.all() +for user in users: + orders = Order.query.filter_by(user_id=user.id).all() # N+1! + +# ✅ GOOD: Use JOIN or eager loading +users = User.query.options(joinedload(User.orders)).all() +for user in users: + orders = user.orders +``` + +## Architecture and Design + +When performing a code review, verify architectural principles: + +- **Separation of Concerns**: Clear boundaries between layers/modules +- **Dependency Direction**: High-level modules don't depend on low-level details +- **Interface Segregation**: Prefer small, focused interfaces +- **Loose Coupling**: Components should be independently testable +- **High Cohesion**: Related functionality grouped together +- **Consistent Patterns**: Follow established patterns in the codebase + +## Documentation Standards + +When performing a code review, check documentation: + +- **Doc String Documentation**: Code must be documented (purpose, parameters, returns) +- **Complex Logic**: Non-obvious logic should have explanatory comments +- **README Updates**: Update README when adding features or changing setup +- **Breaking Changes**: Document any breaking changes clearly +- **Examples**: Provide usage examples for complex features + +## Comment Format Template + +When performing a code review, use this format for comments: + +```markdown +**[PRIORITY] Category: Brief title** + +Detailed description of the issue or suggestion. + +**Why this matters:** +Explanation of the impact or reason for the suggestion. + +**Suggested fix:** +[code example if applicable] + +**Reference:** [link to relevant documentation or standard] +``` + +### Example Comments + +#### Critical Issue + +````markdown +**🔴 CRITICAL - Security: SQL Injection Vulnerability** + +The query on line 45 concatenates user input directly into the SQL string, +creating a SQL injection vulnerability. + +**Why this matters:** +An attacker could manipulate the email parameter to execute arbitrary SQL commands, +potentially exposing or deleting all database data. + +**Suggested fix:** +```sql +-- Instead of: +query = "SELECT * FROM users WHERE email = '" + email + "'" + +-- Use: +PreparedStatement stmt = conn.prepareStatement( + "SELECT * FROM users WHERE email = ?" +); +stmt.setString(1, email); +``` + +**Reference:** OWASP SQL Injection Prevention Cheat Sheet +```` + +#### Important Issue + +````markdown +**🟡 IMPORTANT - Testing: Missing test coverage for critical path** + +The `processPayment()` function handles financial transactions but has no tests +for the refund scenario. + +**Why this matters:** +Refunds involve money movement and should be thoroughly tested to prevent +financial errors or data inconsistencies. + +**Suggested fix:** +Add test case: +```javascript +test('should process full refund when order is cancelled', () => { + const order = createOrder({ total: 100, status: 'cancelled' }); + + const result = processPayment(order, { type: 'refund' }); + + expect(result.refundAmount).toBe(100); + expect(result.status).toBe('refunded'); +}); +``` +```` + +#### Suggestion + +````markdown +**🟢 SUGGESTION - Readability: Simplify nested conditionals** + +The nested if statements on lines 30-40 make the logic hard to follow. + +**Why this matters:** +Simpler code is easier to maintain, debug, and test. + +**Suggested fix:** +```javascript +// Instead of nested ifs: +if (user) { + if (user.isActive) { + if (user.hasPermission('write')) { + // do something + } + } +} + +// Consider guard clauses: +if (!user || !user.isActive || !user.hasPermission('write')) { + return; +} +// do something +``` +```` + +## Review Checklist + +When performing a code review, systematically verify: + +### Code Quality + +- [ ] Code follows consistent style and conventions +- [ ] Names are descriptive and follow naming conventions +- [ ] Functions/methods are small and focused +- [ ] No code duplication +- [ ] Complex logic is broken into simpler parts +- [ ] Error handling is appropriate +- [ ] No commented-out code or TODO without tickets + +### Security + +- [ ] No sensitive data in code or logs + +### Testing + +- [ ] New code has appropriate test coverage +- [ ] Tests are well-named and focused +- [ ] Tests cover edge cases and error scenarios +- [ ] Tests are independent and deterministic +- [ ] No tests that always pass or are commented out + +### Performance + +- [ ] No obvious performance issues (N+1, memory leaks) +- [ ] Appropriate use of caching +- [ ] Efficient algorithms and data structures +- [ ] Proper resource cleanup + +### Architecture + +- [ ] Follows established patterns and conventions +- [ ] Proper separation of concerns +- [ ] No architectural violations +- [ ] Dependencies flow in correct direction + +### Documentation + +- [ ] Complex logic has explanatory comments +- [ ] README is updated if needed +- [ ] Breaking changes are documented + +## Project-Specific Customizations + +To customize this template for your project, add sections for: + +1. **Language/Framework specific checks** + - Check that wireup has been used for dependency injection, where appropriate +2. **Build and deployment** + - When performing a code review, verify CI/CD pipeline configuration is correct + - When performing a code review, check terraform changes are safe and syntactically correct + +## Additional Resources + +For more information on effective code reviews and GitHub Copilot customization: + +- [GitHub Copilot Prompt Engineering](https://docs.github.com/en/copilot/concepts/prompting/prompt-engineering) +- [GitHub Copilot Custom Instructions](https://code.visualstudio.com/docs/copilot/customization/custom-instructions) +- [Awesome GitHub Copilot Repository](https://github.com/github/awesome-copilot) +- [GitHub Code Review Guidelines](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests) +- [Google Engineering Practices - Code Review](https://google.github.io/eng-practices/review/) +- [OWASP Security Guidelines](https://owasp.org/) + +## Prompt Engineering Tips + +When performing a code review, apply these prompt engineering principles from the [GitHub Copilot documentation](https://docs.github.com/en/copilot/concepts/prompting/prompt-engineering): + +1. **Start General, Then Get Specific**: Begin with high-level architecture review, then drill into implementation details +2. **Give Examples**: Reference similar patterns in the codebase when suggesting changes +3. **Break Complex Tasks**: Review large PRs in logical chunks (security → tests → logic → style) +4. **Avoid Ambiguity**: Be specific about which file, line, and issue you're addressing +5. **Indicate Relevant Code**: Reference related code that might be affected by changes +6. **Experiment and Iterate**: If initial review misses something, review again with focused questions + +## Project Context + +This is a generic template. Customize this section with your project-specific information: + +- **Tech Stack**: Python, Terraform +- **Build Tool**: Poetry, Docker (for Lambda), Terraform +- **Testing**: pytest From 9e6ce50ac5994c00bbb5a9f8cf3f0524c6dd450d Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:13:25 +0000 Subject: [PATCH 43/66] eja - refining instruction files using Github best practice prompt --- .github/instructions/*.instructions.md | 409 ------------------ .../instructions/code-review.instructions.md | 65 +++ .github/instructions/python.instructions.md | 93 ++++ 3 files changed, 158 insertions(+), 409 deletions(-) delete mode 100644 .github/instructions/*.instructions.md create mode 100644 .github/instructions/code-review.instructions.md create mode 100644 .github/instructions/python.instructions.md diff --git a/.github/instructions/*.instructions.md b/.github/instructions/*.instructions.md deleted file mode 100644 index 303eabcbb..000000000 --- a/.github/instructions/*.instructions.md +++ /dev/null @@ -1,409 +0,0 @@ ---- -description: 'Generic code review instructions that can be customized for any project using GitHub Copilot' -applyTo: '**' -excludeAgent: ["coding-agent"] - ---- - -# Generic Code Review Instructions - -Comprehensive code review guidelines for GitHub Copilot that can be adapted to any project. These instructions follow best practices from prompt engineering and provide a structured approach to code quality, security, testing, and architecture review. - -## Review Language - -When performing a code review, respond in **English** (or specify your preferred language). - -> **Customization Tip**: Change to your preferred language by replacing "English" with "Portuguese (Brazilian)", "Spanish", "French", etc. - -## Review Priorities - -When performing a code review, prioritize issues in the following order: - -### 🔴 CRITICAL (Block merge) - -- **Security**: Vulnerabilities, exposed secrets, authentication/authorization issues -- **Correctness**: Logic errors, data corruption risks, race conditions -- **Breaking Changes**: API contract changes without versioning -- **Data Loss**: Risk of data loss or corruption - -### 🟡 IMPORTANT (Requires discussion) - -- **Code Quality**: Severe violations of SOLID principles, excessive duplication -- **Test Coverage**: Missing tests for critical paths or new functionality -- **Performance**: Obvious performance bottlenecks (N+1 queries, memory leaks) -- **Architecture**: Significant deviations from established patterns - -### 🟢 SUGGESTION (Non-blocking improvements) - -- **Readability**: Poor naming, complex logic that could be simplified -- **Optimization**: Performance improvements without functional impact -- **Best Practices**: Minor deviations from conventions -- **Documentation**: Missing or incomplete comments/documentation - -## General Review Principles - -When performing a code review, follow these principles: - -1. **Be specific**: Reference exact lines, files, and provide concrete examples -2. **Provide context**: Explain WHY something is an issue and the potential impact -3. **Suggest solutions**: Show corrected code when applicable, not just what's wrong -4. **Be constructive**: Focus on improving the code, not criticizing the author -5. **Recognize good practices**: Acknowledge well-written code and smart solutions -6. **Be pragmatic**: Not every suggestion needs immediate implementation -7. **Group related comments**: Avoid multiple comments about the same topic - -## Code Quality Standards - -When performing a code review, check for: - -### Clean Code - -- Descriptive and meaningful names for variables, functions, and classes -- Single Responsibility Principle: each function/class does one thing well -- DRY (Don't Repeat Yourself): no code duplication -- Functions should be small and focused (ideally < 20-30 lines) -- Avoid deeply nested code (max 3-4 levels) -- Avoid magic numbers and strings (use constants) -- Code should be self-documenting; comments only when necessary - -### Examples - -```javascript -// ❌ BAD: Poor naming and magic numbers -function calc(x, y) { - if (x > 100) return y * 0.15; - return y * 0.10; -} - -// ✅ GOOD: Clear naming and constants -const PREMIUM_THRESHOLD = 100; -const PREMIUM_DISCOUNT_RATE = 0.15; -const STANDARD_DISCOUNT_RATE = 0.10; - -function calculateDiscount(orderTotal, itemPrice) { - const isPremiumOrder = orderTotal > PREMIUM_THRESHOLD; - const discountRate = isPremiumOrder ? PREMIUM_DISCOUNT_RATE : STANDARD_DISCOUNT_RATE; - return itemPrice * discountRate; -} -``` - -### Error Handling - -- Proper error handling at appropriate levels -- Meaningful error messages -- No silent failures or ignored exceptions -- Fail fast: validate inputs early -- Use appropriate error types/exceptions - -### Examples - -```python -# ❌ BAD: Silent failure and generic error -def process_user(user_id): - try: - user = db.get(user_id) - user.process() - except: - pass - -# ✅ GOOD: Explicit error handling -def process_user(user_id): - if not user_id or user_id <= 0: - raise ValueError(f"Invalid user_id: {user_id}") - - try: - user = db.get(user_id) - except UserNotFoundError: - raise UserNotFoundError(f"User {user_id} not found in database") - except DatabaseError as e: - raise ProcessingError(f"Failed to retrieve user {user_id}: {e}") - - return user.process() -``` - -## Security Review - -When performing a code review, check for security issues: - -- **Sensitive Data**: No passwords, API keys, tokens, or PII in code or logs -- **Input Validation**: All user inputs are validated and sanitized -- **SQL Injection**: Use parameterized queries, never string concatenation -- **Authentication**: Proper authentication checks before accessing resources -- **Authorization**: Verify user has permission to perform action -- **Cryptography**: Use established libraries, never roll your own crypto -- **Dependency Security**: Check for known vulnerabilities in dependencies - -### Examples - -```javascript -// ❌ BAD: Exposed secret in code -const API_KEY = "sk_live_abc123xyz789"; - -// ✅ GOOD: Use environment variables -const API_KEY = process.env.API_KEY; -``` - -## Testing Standards - -When performing a code review, verify test quality: - -- **Coverage**: Critical paths and new functionality must have tests -- **Test Names**: Descriptive names that explain what is being tested -- **Test Structure**: Clear Arrange-Act-Assert or Given-When-Then pattern -- **Independence**: Tests should not depend on each other or external state -- **Assertions**: Use specific assertions using HamCrest, avoid generic assertTrue/assertFalse -- **Edge Cases**: Test boundary conditions, null values, empty collections -- **Mock Appropriately**: Mock external dependencies, not domain logic - -### Examples - -```typescript -// ❌ BAD: Vague name and assertion -test('test1', () => { - const result = calc(5, 10); - expect(result).toBeTruthy(); -}); - -// ✅ GOOD: Descriptive name and specific assertion -test('should calculate 10% discount for orders under $100', () => { - const orderTotal = 50; - const itemPrice = 20; - - const discount = calculateDiscount(orderTotal, itemPrice); - - expect(discount).toBe(2.00); -}); -``` - -## Performance Considerations - -When performing a code review, check for performance issues: - -- **Database Queries**: Avoid N+1 queries, use proper indexing -- **Algorithms**: Appropriate time/space complexity for the use case -- **Caching**: Utilize caching for expensive or repeated operations -- **Resource Management**: Proper cleanup of connections, files, streams -- **Lazy Loading**: Load data only when needed - -### Examples - -```python -# ❌ BAD: N+1 query problem -users = User.query.all() -for user in users: - orders = Order.query.filter_by(user_id=user.id).all() # N+1! - -# ✅ GOOD: Use JOIN or eager loading -users = User.query.options(joinedload(User.orders)).all() -for user in users: - orders = user.orders -``` - -## Architecture and Design - -When performing a code review, verify architectural principles: - -- **Separation of Concerns**: Clear boundaries between layers/modules -- **Dependency Direction**: High-level modules don't depend on low-level details -- **Interface Segregation**: Prefer small, focused interfaces -- **Loose Coupling**: Components should be independently testable -- **High Cohesion**: Related functionality grouped together -- **Consistent Patterns**: Follow established patterns in the codebase - -## Documentation Standards - -When performing a code review, check documentation: - -- **Doc String Documentation**: Code must be documented (purpose, parameters, returns) -- **Complex Logic**: Non-obvious logic should have explanatory comments -- **README Updates**: Update README when adding features or changing setup -- **Breaking Changes**: Document any breaking changes clearly -- **Examples**: Provide usage examples for complex features - -## Comment Format Template - -When performing a code review, use this format for comments: - -```markdown -**[PRIORITY] Category: Brief title** - -Detailed description of the issue or suggestion. - -**Why this matters:** -Explanation of the impact or reason for the suggestion. - -**Suggested fix:** -[code example if applicable] - -**Reference:** [link to relevant documentation or standard] -``` - -### Example Comments - -#### Critical Issue - -````markdown -**🔴 CRITICAL - Security: SQL Injection Vulnerability** - -The query on line 45 concatenates user input directly into the SQL string, -creating a SQL injection vulnerability. - -**Why this matters:** -An attacker could manipulate the email parameter to execute arbitrary SQL commands, -potentially exposing or deleting all database data. - -**Suggested fix:** -```sql --- Instead of: -query = "SELECT * FROM users WHERE email = '" + email + "'" - --- Use: -PreparedStatement stmt = conn.prepareStatement( - "SELECT * FROM users WHERE email = ?" -); -stmt.setString(1, email); -``` - -**Reference:** OWASP SQL Injection Prevention Cheat Sheet -```` - -#### Important Issue - -````markdown -**🟡 IMPORTANT - Testing: Missing test coverage for critical path** - -The `processPayment()` function handles financial transactions but has no tests -for the refund scenario. - -**Why this matters:** -Refunds involve money movement and should be thoroughly tested to prevent -financial errors or data inconsistencies. - -**Suggested fix:** -Add test case: -```javascript -test('should process full refund when order is cancelled', () => { - const order = createOrder({ total: 100, status: 'cancelled' }); - - const result = processPayment(order, { type: 'refund' }); - - expect(result.refundAmount).toBe(100); - expect(result.status).toBe('refunded'); -}); -``` -```` - -#### Suggestion - -````markdown -**🟢 SUGGESTION - Readability: Simplify nested conditionals** - -The nested if statements on lines 30-40 make the logic hard to follow. - -**Why this matters:** -Simpler code is easier to maintain, debug, and test. - -**Suggested fix:** -```javascript -// Instead of nested ifs: -if (user) { - if (user.isActive) { - if (user.hasPermission('write')) { - // do something - } - } -} - -// Consider guard clauses: -if (!user || !user.isActive || !user.hasPermission('write')) { - return; -} -// do something -``` -```` - -## Review Checklist - -When performing a code review, systematically verify: - -### Code Quality - -- [ ] Code follows consistent style and conventions -- [ ] Names are descriptive and follow naming conventions -- [ ] Functions/methods are small and focused -- [ ] No code duplication -- [ ] Complex logic is broken into simpler parts -- [ ] Error handling is appropriate -- [ ] No commented-out code or TODO without tickets - -### Security - -- [ ] No sensitive data in code or logs - -### Testing - -- [ ] New code has appropriate test coverage -- [ ] Tests are well-named and focused -- [ ] Tests cover edge cases and error scenarios -- [ ] Tests are independent and deterministic -- [ ] No tests that always pass or are commented out - -### Performance - -- [ ] No obvious performance issues (N+1, memory leaks) -- [ ] Appropriate use of caching -- [ ] Efficient algorithms and data structures -- [ ] Proper resource cleanup - -### Architecture - -- [ ] Follows established patterns and conventions -- [ ] Proper separation of concerns -- [ ] No architectural violations -- [ ] Dependencies flow in correct direction - -### Documentation - -- [ ] Complex logic has explanatory comments -- [ ] README is updated if needed -- [ ] Breaking changes are documented - -## Project-Specific Customizations - -To customize this template for your project, add sections for: - -1. **Language/Framework specific checks** - - Check that wireup has been used for dependency injection, where appropriate -2. **Build and deployment** - - When performing a code review, verify CI/CD pipeline configuration is correct - - When performing a code review, check terraform changes are safe and syntactically correct - -## Additional Resources - -For more information on effective code reviews and GitHub Copilot customization: - -- [GitHub Copilot Prompt Engineering](https://docs.github.com/en/copilot/concepts/prompting/prompt-engineering) -- [GitHub Copilot Custom Instructions](https://code.visualstudio.com/docs/copilot/customization/custom-instructions) -- [Awesome GitHub Copilot Repository](https://github.com/github/awesome-copilot) -- [GitHub Code Review Guidelines](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests) -- [Google Engineering Practices - Code Review](https://google.github.io/eng-practices/review/) -- [OWASP Security Guidelines](https://owasp.org/) - -## Prompt Engineering Tips - -When performing a code review, apply these prompt engineering principles from the [GitHub Copilot documentation](https://docs.github.com/en/copilot/concepts/prompting/prompt-engineering): - -1. **Start General, Then Get Specific**: Begin with high-level architecture review, then drill into implementation details -2. **Give Examples**: Reference similar patterns in the codebase when suggesting changes -3. **Break Complex Tasks**: Review large PRs in logical chunks (security → tests → logic → style) -4. **Avoid Ambiguity**: Be specific about which file, line, and issue you're addressing -5. **Indicate Relevant Code**: Reference related code that might be affected by changes -6. **Experiment and Iterate**: If initial review misses something, review again with focused questions - -## Project Context - -This is a generic template. Customize this section with your project-specific information: - -- **Tech Stack**: Python, Terraform -- **Build Tool**: Poetry, Docker (for Lambda), Terraform -- **Testing**: pytest diff --git a/.github/instructions/code-review.instructions.md b/.github/instructions/code-review.instructions.md new file mode 100644 index 000000000..99c49ab05 --- /dev/null +++ b/.github/instructions/code-review.instructions.md @@ -0,0 +1,65 @@ +--- +description: "Code review instructions for the eligibility-signposting-api project" +applyTo: "**" +excludeAgent: ["coding-agent"] +--- + +# Code Review Instructions + +Guidelines for the eligibility-signposting-api project — a serverless AWS Lambda + Flask eligibility rules engine. + +## Review Priorities + +### Critical (block merge) + +- **Security**: Exposed secrets, PII leakage (especially NHS Numbers), missing header validation +- **Correctness**: Logic errors in rules engine evaluation, incorrect operator behaviour, data corruption in DynamoDB +- **Breaking Changes**: API contract changes to FHIR response models or request validation + +### Important (requires discussion) + +- **Code Quality**: SOLID violations, excessive duplication +- **Test Coverage**: Missing tests for critical paths, new rules/operators, or edge cases +- **Performance**: Unnecessary DynamoDB scans, missing caching, Lambda cold start regressions +- **Architecture**: Deviations from established patterns (wireup DI, chain of responsibility, operator registry) + +### Suggestion (non-blocking) + +- **Readability**: Naming, simplification of complex logic +- **Best Practices**: Minor convention deviations +- **Documentation**: Missing or incomplete docstrings + +## Security + +- **PII handling**: NHS Numbers must never appear in logs or error messages. `TokenError` messages must be redacted. Verify new log statements do not leak person data. +- **Secrets**: No API keys, tokens, or secrets in code. Use environment variables or AWS Secrets Manager. +- **NHS Number hashing**: Lookups use HMAC-SHA512 via `HashingService` with secret rotation (AWSCURRENT → AWSPREVIOUS fallback). +- **Header validation**: `NHSE-Product-ID` must be present (403 if missing). `nhs-login-nhs-number` must match path parameter. +- **Security headers**: Responses must include `Cache-Control: no-store, private`, `Strict-Transport-Security`, `X-Content-Type-Options: nosniff`. + +## Architecture + +- **Dependency injection**: Use wireup `@service` for all services, repos, and factories. Inject via `Injected[T]`, `Inject(qualifier=...)`, or `Inject(param=...)`. Never instantiate services manually. +- **Chain of responsibility**: Processing follows `CohortEligibilityHandler → BaseEligibilityHandler → FilterRuleHandler → SuppressionRuleHandler`. Extend this chain for new steps. +- **Operator registry**: New operators must extend `hamcrest.BaseMatcher` and register via the decorator-based `OperatorRegistry`. +- **Pydantic models**: Use `Field(alias=...)` for JSON mapping, `field_validator`/`model_validator` for validation. Response models use camelCase aliases. +- **FHIR compliance**: Error responses must use `OperationOutcome` models with `application/fhir+json` content type. +- **Lambda reuse**: The Flask app is cached in `CacheManager` across invocations. Changes to app initialization must not break container reuse. + +## Performance + +- **DynamoDB**: Use `query()` with `KeyConditionExpression`, never `scan()`. Partition key is `NHS_NUMBER`, sort key discriminator is `ATTRIBUTE_TYPE`. +- **S3 config loading**: Campaign configs load from S3 per request. Avoid unnecessary `list_objects` or `get_object` calls. +- **Caching**: Feature toggles use `TTLCache` (300s). New caching should follow the same pattern with appropriate TTLs. +- **Lambda cold starts**: Avoid heavy imports at module level. Keep wireup service graph lean. + +## Audit Trail + +- **Completeness**: New eligibility logic must call `AuditContext.append_audit_condition()` to record evaluation details. +- **Firehose delivery**: Audit events use Pydantic `AuditEvent` models sent to Kinesis Firehose. Preserve the full audit data model. + +## Terraform + +- **Encryption**: All AWS resources (DynamoDB, S3, Lambda, Firehose, Secrets Manager) must use KMS CMK encryption. +- **Environment parity**: Verify deletion protection and PITR are enabled for prod/preprod DynamoDB tables. +- **Safety**: Terraform changes must not destroy or replace stateful resources (DynamoDB tables, S3 buckets) unintentionally. diff --git a/.github/instructions/python.instructions.md b/.github/instructions/python.instructions.md new file mode 100644 index 000000000..d5dd89b48 --- /dev/null +++ b/.github/instructions/python.instructions.md @@ -0,0 +1,93 @@ +--- +description: "Python coding standards for the eligibility-signposting-api project" +applyTo: "**/*.py" +--- + +# Python Coding Standards + +## Naming Conventions + +- `snake_case` for functions, variables, and module names. +- `PascalCase` for class names. +- `UPPER_SNAKE_CASE` for constants. +- Prefix private methods and attributes with `_`. + +## Code Style + +- Line length limit: 120 characters (enforced by ruff). +- Use type hints for all function signatures and return types. +- Prefer `dataclass` for simple domain objects, Pydantic `BaseModel` for validated/serialized models. +- Use `StrEnum` for string enumerations. +- Avoid bare `except:` — catch specific exceptions. + +## Error Handling + +```python +# Bad: silent failure +try: + person = repo.get(nhs_number) +except Exception: + pass + +# Good: specific exceptions with context +try: + person = repo.get(nhs_number) +except PersonNotFoundError: + raise +except ClientError as e: + raise RepositoryError(f"Failed to query person table: {e}") from e +``` + +## Dependency Injection (wireup) + +- Decorate services with `@service`. Do not instantiate services manually. +- Use `Inject(qualifier=...)` for AWS client disambiguation. +- Use `Inject(param=...)` for configuration values. +- Register factory functions with `@service` for boto3 clients. + +```python +# Good +@service +class MyService: + def __init__(self, repo: Injected[MyRepo]) -> None: + self._repo = repo + +# Bad: manual instantiation +class MyService: + def __init__(self) -> None: + self._repo = MyRepo() +``` + +## Pydantic Models + +- Use `Field(alias=...)` for JSON key mapping. +- Use `field_validator` / `model_validator` for custom validation. +- Response models must use camelCase aliases (`alias_generator=to_camel` or explicit aliases). +- Use `model_dump(by_alias=True)` when serializing for API responses. + +## Testing + +- Use `pytest` with pyHamcrest assertions (`assert_that`, `is_`, `has_entries`, `contains_exactly`, etc.). +- Use `brunns-matchers` for Werkzeug response assertions. +- Use project auto-matchers (`BaseAutoMatcher`) for dataclass/Pydantic model assertions. +- Use `polyfactory` (`DataclassFactory` / `ModelFactory`) for test data builders. +- Mock AWS services with `moto`, not manual stubs. +- Use `@pytest.mark.parametrize` for rule/operator test cases. + +```python +# Good: pyHamcrest with specific matchers +def test_eligible_person_returns_eligible_status(): + result = evaluate(person, campaign) + assert_that(result, is_(has_property("status", equal_to(Status.ELIGIBLE)))) + +# Bad: generic assert +def test_eligible(): + result = evaluate(person, campaign) + assert result is not None +``` + +## Logging + +- Use structured JSON logging via `python-json-logger`. +- Never log NHS Numbers or other PII. +- Include `request_id` via the `ContextVar` pattern for request tracing. From 5940f3eb7a853cfeb41927c3a00cf09ede5da4f2 Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Wed, 4 Mar 2026 12:57:10 +0000 Subject: [PATCH 44/66] eja - fixing secret scan and vale --- .github/instructions/code-review.instructions.md | 4 ++-- scripts/config/gitleaks.toml | 2 +- .../config/vale/styles/config/vocabularies/words/accept.txt | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/instructions/code-review.instructions.md b/.github/instructions/code-review.instructions.md index 99c49ab05..5bf9ebb4c 100644 --- a/.github/instructions/code-review.instructions.md +++ b/.github/instructions/code-review.instructions.md @@ -49,7 +49,7 @@ Guidelines for the eligibility-signposting-api project — a serverless AWS Lamb ## Performance - **DynamoDB**: Use `query()` with `KeyConditionExpression`, never `scan()`. Partition key is `NHS_NUMBER`, sort key discriminator is `ATTRIBUTE_TYPE`. -- **S3 config loading**: Campaign configs load from S3 per request. Avoid unnecessary `list_objects` or `get_object` calls. +- **S3 configuration loading**: Campaign configs load from S3 per request. Avoid unnecessary `list_objects` or `get_object` calls. - **Caching**: Feature toggles use `TTLCache` (300s). New caching should follow the same pattern with appropriate TTLs. - **Lambda cold starts**: Avoid heavy imports at module level. Keep wireup service graph lean. @@ -61,5 +61,5 @@ Guidelines for the eligibility-signposting-api project — a serverless AWS Lamb ## Terraform - **Encryption**: All AWS resources (DynamoDB, S3, Lambda, Firehose, Secrets Manager) must use KMS CMK encryption. -- **Environment parity**: Verify deletion protection and PITR are enabled for prod/preprod DynamoDB tables. +- **Environment parity**: Verify deletion protection and PITR are enabled for production/pre-production DynamoDB tables. - **Safety**: Terraform changes must not destroy or replace stateful resources (DynamoDB tables, S3 buckets) unintentionally. diff --git a/scripts/config/gitleaks.toml b/scripts/config/gitleaks.toml index 66a3d7e94..adf24874f 100644 --- a/scripts/config/gitleaks.toml +++ b/scripts/config/gitleaks.toml @@ -16,5 +16,5 @@ regexes = [ ] [allowlist] -paths = ['''.terraform.lock.hcl''', '''poetry.lock''', '''yarn.lock'''] +paths = ['''.terraform.lock.hcl''', '''poetry.lock''', '''yarn.lock''', '''.github/instructions/\*.instructions.md'''] stopwords = ['''dummy_key''', '''dummy_secret''', '''192.0.0.1''', '''prance = "^25.4.8.0"''', '''25.4.8.0'''] diff --git a/scripts/config/vale/styles/config/vocabularies/words/accept.txt b/scripts/config/vale/styles/config/vocabularies/words/accept.txt index fc7d8ef23..dc1443bea 100644 --- a/scripts/config/vale/styles/config/vocabularies/words/accept.txt +++ b/scripts/config/vale/styles/config/vocabularies/words/accept.txt @@ -27,6 +27,7 @@ Terraform toolchain Trufflehog Uncomment +Werkzeug Syncytial pyenv colima From c81afdd403c4823e9a1a6d17a36d901747bd282d Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Wed, 4 Mar 2026 15:02:32 +0000 Subject: [PATCH 45/66] added ignore for gitleaks as well as an allow for the file --- .gitleaksignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitleaksignore b/.gitleaksignore index ff9cec0ef..23899810e 100644 --- a/.gitleaksignore +++ b/.gitleaksignore @@ -1,5 +1,4 @@ # SEE: https://github.com/gitleaks/gitleaks/blob/master/README.md#gitleaksignore cd9c0efec38c5d63053dd865e5d4e207c0760d91:docs/guides/Perform_static_analysis.md:generic-api-key:37 - bf0c77098978c450d8570b38fb480fbb8d6a0628:.github/instructions/*.instructions.md:stripe-access-token:140 From 525c9e57614803f981db3efd9b2d006618fd0585 Mon Sep 17 00:00:00 2001 From: Karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 15:53:39 +0000 Subject: [PATCH 46/66] Eli 615 : fix - multi campaign target collision (#593) * ELI-615 | campaign having recent - active start_date supersedes the others sharing same best-status * ELI-615 | more linting * ELI-615 | revert commit * ELI-615 | wip * ELI-615 | wip * ELI-615 | wip * Bump werkzeug from 3.1.5 to 3.1.6 Bumps [werkzeug](https://github.com/pallets/werkzeug) from 3.1.5 to 3.1.6. - [Release notes](https://github.com/pallets/werkzeug/releases) - [Changelog](https://github.com/pallets/werkzeug/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/werkzeug/compare/3.1.5...3.1.6) --- updated-dependencies: - dependency-name: werkzeug dependency-version: 3.1.6 dependency-type: indirect ... Signed-off-by: dependabot[bot] * Updated not_member_of operator to NotMemberOf (#594) * Added vulture to workflows (#585) * Added vulture to workflows * Added new make commands and added to project * Added updated lockfile * Minimal config with no errors * Corrected vulture commands * Generating new lock file * ELI-615 | modified iterations_result to iteration result * ELI-615 | fix - naming issues | handle stop iter exception * ELI-615 | campaign_configs - fixture updated | test case fixed * ELI-615 | fix flaky tests do to fixture scope * ELI-615 | fix flaky tests - removed best status test * ELI-615 | used raw campagin config for tests using iteration dates * ELI-615 | fix - campaign group is used correctly * ELI-615 | fix test_campaigns_grouped_by_condition_name_filters_correctly * ELI-615 | fix tests * ELI-615 | linting * ELI-615 | renamed best_iteration_result to iteration_result_summary * ELI-615 | add more test cases - it tests * ELI-615 | test commit - try git leaks ignore * ELI-615 | incorporated review comments --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: oneeb-nhs <258801025+oneeb-nhs@users.noreply.github.com> Co-authored-by: Robert Bailiff --- .gitleaksignore | 1 + .../services/processors/campaign_evaluator.py | 10 ++++------ .../in_process/test_eligibility_endpoint.py | 9 +++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.gitleaksignore b/.gitleaksignore index 23899810e..ff9cec0ef 100644 --- a/.gitleaksignore +++ b/.gitleaksignore @@ -1,4 +1,5 @@ # SEE: https://github.com/gitleaks/gitleaks/blob/master/README.md#gitleaksignore cd9c0efec38c5d63053dd865e5d4e207c0760d91:docs/guides/Perform_static_analysis.md:generic-api-key:37 + bf0c77098978c450d8570b38fb480fbb8d6a0628:.github/instructions/*.instructions.md:stripe-access-token:140 diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 248c1c967..9d1a9aad1 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -31,7 +31,7 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf for cc in active_campaigns: try: - valid_items.append((cc.current_iteration.iteration_datetime, cc)) + valid_items.append((cc.current_iteration.iteration_date, cc)) except StopIteration: logger.info( "Skipping campaign ID %s as no active iteration was found.", @@ -41,15 +41,13 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf if not valid_items: latest_campaign = None else: - max_date_time = max(item[0] for item in valid_items) - cc_with_max_iteration_date: list[CampaignConfig] = [ - item[1] for item in valid_items if item[0] == max_date_time - ] + max_date = max(item[0] for item in valid_items) + cc_with_max_iteration_date: list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] if len(cc_with_max_iteration_date) > 1: err_msg = ( f"Ambiguous result: '{len(cc_with_max_iteration_date)}' active iterations " f"for target {cc_with_max_iteration_date[0].target} " - f"found for datetime '{max_date_time}' " + f"found for date '{max_date}' " f"across campaign(s) {[cc.id for cc in cc_with_max_iteration_date]}" ) raise ValueError(err_msg) diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index a7ce2aea3..bc6fe2693 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -33,15 +33,15 @@ from tests.integration.conftest import UNIQUE_CONSUMER_HEADER -def today() -> date: +def today(): return datetime.now(UTC).date() -def yesterday() -> date: +def yesterday(): return datetime.now(UTC).date() - timedelta(days=1) -def tomorrow() -> date: +def tomorrow(): return datetime.now(UTC).date() + timedelta(days=1) @@ -1532,10 +1532,11 @@ def test_if_multiple_active_iterations_with_same_iteration_datetime_for_the_same ) ), ) + err_msg = ( "Ambiguous result: '2' active iterations " "for target RSV " - f"found for datetime '{previous_day} 00:00:00+00:00' " + f"found for date '{previous_day}' " "across campaign(s) ['RSV_campaign_id_1', 'RSV_campaign_id_2']" ) assert any(err_msg in message for message in caplog.messages), ( From f053d991df459ff5445cdea11b27b6e35aa8f010 Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:18:25 +0000 Subject: [PATCH 47/66] eli-537 enabling WAF blocks --- infrastructure/stacks/api-layer/waf.tf | 27 ++++++++++++------- infrastructure/stacks/api-layer/waf_alarms.tf | 26 +++++++++--------- 2 files changed, 31 insertions(+), 22 deletions(-) diff --git a/infrastructure/stacks/api-layer/waf.tf b/infrastructure/stacks/api-layer/waf.tf index 61f590b63..3a54aeb65 100644 --- a/infrastructure/stacks/api-layer/waf.tf +++ b/infrastructure/stacks/api-layer/waf.tf @@ -1,6 +1,5 @@ # WAF Web ACL for API Gateway # Only deployed in production environment for cost optimization -# Initially all rules are in COUNT mode to monitor traffic patterns resource "aws_wafv2_web_acl" "api_gateway" { count = local.waf_enabled ? 1 : 0 @@ -19,7 +18,7 @@ resource "aws_wafv2_web_acl" "api_gateway" { priority = 10 override_action { - count {} # Start in count mode - change to none {} when ready to block + none {} } statement { @@ -43,13 +42,21 @@ resource "aws_wafv2_web_acl" "api_gateway" { priority = 20 override_action { - count {} # Start in count mode - change to none {} when ready to block + none {} } statement { managed_rule_group_statement { vendor_name = "AWS" name = "AWSManagedRulesCommonRuleSet" + + # Override NoUserAgent_Header to count only - APIM health checks send no User-Agent + rule_action_override { + name = "NoUserAgent_Header" + action_to_use { + count {} + } + } } } @@ -93,7 +100,7 @@ resource "aws_wafv2_web_acl" "api_gateway" { priority = 40 action { - count {} # Start in count mode - change to block {} when ready + block {} } statement { @@ -110,15 +117,15 @@ resource "aws_wafv2_web_acl" "api_gateway" { } } - # Rule 5: Geographic Monitoring Rule - Monitor non-UK traffic (COUNT only) - # NHS-specific requirement: initially monitor requests originating from outside GB - # This rule COUNTS any request whose geo country code is not GB (does not block) + # Rule 5: Geographic Block Rule - Block non-UK traffic + # NHS-specific requirement: block requests originating from outside GB + # Defence-in-depth against stolen mTLS certificates being used from outside the UK rule { - name = "MonitorNonUK" + name = "BlockNonUK" priority = 50 action { - count {} + block {} } statement { @@ -133,7 +140,7 @@ resource "aws_wafv2_web_acl" "api_gateway" { visibility_config { cloudwatch_metrics_enabled = true - metric_name = "MonitorNonUK" + metric_name = "BlockNonUK" sampled_requests_enabled = true } } diff --git a/infrastructure/stacks/api-layer/waf_alarms.tf b/infrastructure/stacks/api-layer/waf_alarms.tf index f0550e5b3..a6827713e 100644 --- a/infrastructure/stacks/api-layer/waf_alarms.tf +++ b/infrastructure/stacks/api-layer/waf_alarms.tf @@ -129,14 +129,14 @@ resource "aws_cloudwatch_metric_alarm" "waf_rate_limit_blocks" { ) } -# Alarm for non-UK rate limit violations +# Alarm for blocked non-UK requests resource "aws_cloudwatch_metric_alarm" "waf_non_uk_counted" { count = local.waf_enabled ? 1 : 0 - alarm_name = "WAF-NonUK-CountedRequests-${local.workspace}" - alarm_description = "Alerts when non-UK requests are observed (COUNT mode) by geo rule" + alarm_name = "WAF-NonUK-BlockedRequests-${local.workspace}" + alarm_description = "Alerts when non-UK requests are blocked by geo rule - may indicate stolen mTLS cert use from outside UK" comparison_operator = "GreaterThanThreshold" evaluation_periods = 2 - metric_name = "CountedRequests" + metric_name = "BlockedRequests" namespace = "AWS/WAFV2" period = 300 statistic = "Sum" @@ -145,7 +145,7 @@ resource "aws_cloudwatch_metric_alarm" "waf_non_uk_counted" { dimensions = { Region = var.default_aws_region - Rule = "MonitorNonUK" + Rule = "BlockNonUK" WebACL = aws_wafv2_web_acl.api_gateway[0].name } @@ -154,8 +154,8 @@ resource "aws_cloudwatch_metric_alarm" "waf_non_uk_counted" { tags = merge( local.tags, { - Name = "WAF-NonUK-CountedRequests" - Severity = "medium" + Name = "WAF-NonUK-BlockedRequests" + Severity = "high" Environment = var.environment } ) @@ -192,19 +192,21 @@ resource "aws_cloudwatch_metric_alarm" "waf_all_requests_high" { ) } -# Alarm for monitoring counted requests (during initial count mode) -# This helps identify if rules would block legitimate traffic +# Alarm for counted requests (NoUserAgent_Header override) +# The CRS NoUserAgent_Header sub-rule is kept in COUNT to allow the API proxy healthcheck. +# This alarm alerts if count spikes unexpectedly, which could indicate rule misconfiguration +# or unexpected traffic patterns hitting that override. resource "aws_cloudwatch_metric_alarm" "waf_counted_requests_monitoring" { count = local.waf_enabled ? 1 : 0 alarm_name = "WAF-CountedRequests-Monitoring-${local.workspace}" - alarm_description = "Monitors requests that would be blocked if rules were active (COUNT mode)" + alarm_description = "Monitors counted requests - expected to be low volume (healthcheck NoUserAgent_Header override only)" comparison_operator = "GreaterThanThreshold" evaluation_periods = 1 metric_name = "CountedRequests" namespace = "AWS/WAFV2" period = 300 statistic = "Sum" - threshold = 100 # Alert if many requests would be blocked + threshold = 100 # Alert if count spikes beyond normal healthcheck frequency treat_missing_data = "notBreaching" dimensions = { @@ -220,7 +222,7 @@ resource "aws_cloudwatch_metric_alarm" "waf_counted_requests_monitoring" { Name = "WAF-CountedRequests-Monitoring" Severity = "low" Environment = var.environment - Purpose = "Initial monitoring during COUNT mode phase" + Purpose = "Monitor NoUserAgent_Header count override for healthcheck proxy" } ) } From 48a0e9a95bc054689f7ac02c6a3d7de6c09a674c Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:33:45 +0000 Subject: [PATCH 48/66] eli-537 deleting dev deployment --- infrastructure/stacks/api-layer/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/stacks/api-layer/variables.tf b/infrastructure/stacks/api-layer/variables.tf index 93bb4e2da..f7ac332e3 100644 --- a/infrastructure/stacks/api-layer/variables.tf +++ b/infrastructure/stacks/api-layer/variables.tf @@ -13,7 +13,7 @@ variable "SPLUNK_HEC_ENDPOINT" { variable "waf_enabled_environments" { type = list(string) description = "Environments in which WAF resources are deployed. Adjust to disable in test after evaluation." - default = ["dev", "preprod", "prod"] + default = ["preprod", "prod"] } variable "OPERATOR_EMAILS" { From a966f73e561064491a536b32f05f3b1bffa9063b Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Mon, 2 Mar 2026 15:24:58 +0000 Subject: [PATCH 49/66] eli-537 added US to permitted geos, for preprod only, to allow github action tests to still flow --- infrastructure/stacks/api-layer/waf.tf | 7 ++++--- infrastructure/stacks/api-layer/waf_alarms.tf | 2 ++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/infrastructure/stacks/api-layer/waf.tf b/infrastructure/stacks/api-layer/waf.tf index 3a54aeb65..194f9f591 100644 --- a/infrastructure/stacks/api-layer/waf.tf +++ b/infrastructure/stacks/api-layer/waf.tf @@ -118,8 +118,9 @@ resource "aws_wafv2_web_acl" "api_gateway" { } # Rule 5: Geographic Block Rule - Block non-UK traffic - # NHS-specific requirement: block requests originating from outside GB - # Defence-in-depth against stolen mTLS certificates being used from outside the UK + # Blocks requests from outside the allowed country list. + # In prod: GB only - all legitimate traffic must originate from within the UK + # In preprod: GB + US - GitHub Actions integration tests run from US-based servers rule { name = "BlockNonUK" priority = 50 @@ -132,7 +133,7 @@ resource "aws_wafv2_web_acl" "api_gateway" { not_statement { statement { geo_match_statement { - country_codes = ["GB"] # United Kingdom only (does NOT include Crown Dependencies) + country_codes = var.environment == "preprod" ? ["GB", "US"] : ["GB"] } } } diff --git a/infrastructure/stacks/api-layer/waf_alarms.tf b/infrastructure/stacks/api-layer/waf_alarms.tf index a6827713e..be76924a9 100644 --- a/infrastructure/stacks/api-layer/waf_alarms.tf +++ b/infrastructure/stacks/api-layer/waf_alarms.tf @@ -130,6 +130,8 @@ resource "aws_cloudwatch_metric_alarm" "waf_rate_limit_blocks" { } # Alarm for blocked non-UK requests +# In preprod US is also allowed (for GitHub Actions), so this alarm fires on traffic +# from countries outside GB+US. In prod it fires on anything outside GB. resource "aws_cloudwatch_metric_alarm" "waf_non_uk_counted" { count = local.waf_enabled ? 1 : 0 alarm_name = "WAF-NonUK-BlockedRequests-${local.workspace}" From fe2cbffafc4d7470a1d2467a9b81378bd7232d57 Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Tue, 3 Mar 2026 09:57:46 +0000 Subject: [PATCH 50/66] eli-537 amending rate limit --- infrastructure/stacks/api-layer/waf.tf | 2 +- infrastructure/stacks/api-layer/waf_alarms.tf | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/infrastructure/stacks/api-layer/waf.tf b/infrastructure/stacks/api-layer/waf.tf index 194f9f591..726e259a6 100644 --- a/infrastructure/stacks/api-layer/waf.tf +++ b/infrastructure/stacks/api-layer/waf.tf @@ -105,7 +105,7 @@ resource "aws_wafv2_web_acl" "api_gateway" { statement { rate_based_statement { - limit = 2000 # Requests per 5-minute period per IP + limit = 300000 # 1000 TPS - we should tie this to other rate limits aggregate_key_type = "IP" } } diff --git a/infrastructure/stacks/api-layer/waf_alarms.tf b/infrastructure/stacks/api-layer/waf_alarms.tf index be76924a9..390e7822b 100644 --- a/infrastructure/stacks/api-layer/waf_alarms.tf +++ b/infrastructure/stacks/api-layer/waf_alarms.tf @@ -98,17 +98,20 @@ resource "aws_cloudwatch_metric_alarm" "waf_bad_inputs_blocks" { } # Alarm for rate limit violations (overall) +# Rate limit is set to 300,000 req/5min (1000 TPS headroom over 500 TPS peak). +# Any block at this threshold is a serious incident - a single IP would need to exceed +# 300k requests in 5 minutes, which indicates a runaway or compromised proxy. resource "aws_cloudwatch_metric_alarm" "waf_rate_limit_blocks" { count = local.waf_enabled ? 1 : 0 alarm_name = "WAF-RateLimit-Blocks-${local.workspace}" - alarm_description = "Alerts when requests are rate-limited (potential DDoS)" + alarm_description = "Alerts when requests are rate-limited - at 300k/5min limit this indicates a runaway or compromised proxy" comparison_operator = "GreaterThanThreshold" - evaluation_periods = 2 + evaluation_periods = 1 metric_name = "BlockedRequests" namespace = "AWS/WAFV2" period = 300 statistic = "Sum" - threshold = 50 # Alert after 50 rate-limited requests + threshold = 1 # Any block at this limit is a serious incident treat_missing_data = "notBreaching" dimensions = { @@ -174,7 +177,7 @@ resource "aws_cloudwatch_metric_alarm" "waf_all_requests_high" { namespace = "AWS/WAFV2" period = 300 statistic = "Sum" - threshold = 10000 # Adjust based on expected traffic + threshold = 300000 # 2x peak (500 TPS = 150k/5min); alert above 300k/5min treat_missing_data = "notBreaching" dimensions = { From deb6ba180e77d5d4399c62898ad765a272dacdce Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Tue, 3 Mar 2026 10:03:47 +0000 Subject: [PATCH 51/66] eli-537 minor changes based on initial review --- infrastructure/stacks/api-layer/variables.tf | 2 +- infrastructure/stacks/api-layer/waf.tf | 2 -- infrastructure/stacks/api-layer/waf_alarms.tf | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/infrastructure/stacks/api-layer/variables.tf b/infrastructure/stacks/api-layer/variables.tf index f7ac332e3..ab76f7185 100644 --- a/infrastructure/stacks/api-layer/variables.tf +++ b/infrastructure/stacks/api-layer/variables.tf @@ -12,7 +12,7 @@ variable "SPLUNK_HEC_ENDPOINT" { # WAF deployment environments (list of environment names where WAF should be deployed) variable "waf_enabled_environments" { type = list(string) - description = "Environments in which WAF resources are deployed. Adjust to disable in test after evaluation." + description = "Environments in which WAF resources are deployed" default = ["preprod", "prod"] } diff --git a/infrastructure/stacks/api-layer/waf.tf b/infrastructure/stacks/api-layer/waf.tf index 726e259a6..7fff9eeb7 100644 --- a/infrastructure/stacks/api-layer/waf.tf +++ b/infrastructure/stacks/api-layer/waf.tf @@ -1,6 +1,4 @@ # WAF Web ACL for API Gateway -# Only deployed in production environment for cost optimization - resource "aws_wafv2_web_acl" "api_gateway" { count = local.waf_enabled ? 1 : 0 name = "${local.workspace}-eligibility-signposting-api-waf" diff --git a/infrastructure/stacks/api-layer/waf_alarms.tf b/infrastructure/stacks/api-layer/waf_alarms.tf index 390e7822b..3ed8acba9 100644 --- a/infrastructure/stacks/api-layer/waf_alarms.tf +++ b/infrastructure/stacks/api-layer/waf_alarms.tf @@ -135,7 +135,7 @@ resource "aws_cloudwatch_metric_alarm" "waf_rate_limit_blocks" { # Alarm for blocked non-UK requests # In preprod US is also allowed (for GitHub Actions), so this alarm fires on traffic # from countries outside GB+US. In prod it fires on anything outside GB. -resource "aws_cloudwatch_metric_alarm" "waf_non_uk_counted" { +resource "aws_cloudwatch_metric_alarm" "waf_non_uk_blocked" { count = local.waf_enabled ? 1 : 0 alarm_name = "WAF-NonUK-BlockedRequests-${local.workspace}" alarm_description = "Alerts when non-UK requests are blocked by geo rule - may indicate stolen mTLS cert use from outside UK" @@ -170,7 +170,7 @@ resource "aws_cloudwatch_metric_alarm" "waf_non_uk_counted" { resource "aws_cloudwatch_metric_alarm" "waf_all_requests_high" { count = local.waf_enabled ? 1 : 0 alarm_name = "WAF-AllRequests-High-${local.workspace}" - alarm_description = "Monitors total request volume through WAF" + alarm_description = "Monitors total allowed request volume through WAF" comparison_operator = "GreaterThanThreshold" evaluation_periods = 2 metric_name = "AllowedRequests" From 75b47b65951a88b57a453a8d07cd5ecd06855131 Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Tue, 3 Mar 2026 10:31:54 +0000 Subject: [PATCH 52/66] eli-537 removing Production from description as we also deploy to PreProd --- infrastructure/stacks/api-layer/waf.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/stacks/api-layer/waf.tf b/infrastructure/stacks/api-layer/waf.tf index 7fff9eeb7..638906a9b 100644 --- a/infrastructure/stacks/api-layer/waf.tf +++ b/infrastructure/stacks/api-layer/waf.tf @@ -2,7 +2,7 @@ resource "aws_wafv2_web_acl" "api_gateway" { count = local.waf_enabled ? 1 : 0 name = "${local.workspace}-eligibility-signposting-api-waf" - description = "WAF Web ACL for Eligibility Signposting API Gateway - Production" + description = "WAF Web ACL for Eligibility Signposting API Gateway" scope = "REGIONAL" default_action { From 7414f0ad77c564703f85518261d5735206a5f0c6 Mon Sep 17 00:00:00 2001 From: Edd Almond <102675624+eddalmond1@users.noreply.github.com> Date: Wed, 4 Mar 2026 14:48:26 +0000 Subject: [PATCH 53/66] eli-537 adding ignore for gitleaks --- .gitleaksignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitleaksignore b/.gitleaksignore index ff9cec0ef..23899810e 100644 --- a/.gitleaksignore +++ b/.gitleaksignore @@ -1,5 +1,4 @@ # SEE: https://github.com/gitleaks/gitleaks/blob/master/README.md#gitleaksignore cd9c0efec38c5d63053dd865e5d4e207c0760d91:docs/guides/Perform_static_analysis.md:generic-api-key:37 - bf0c77098978c450d8570b38fb480fbb8d6a0628:.github/instructions/*.instructions.md:stripe-access-token:140 From 84d4b6bcbba967a5c49d64b0248ecffac299b236 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 09:56:25 +0000 Subject: [PATCH 54/66] Bump authlib from 1.6.6 to 1.6.7 Bumps [authlib](https://github.com/authlib/authlib) from 1.6.6 to 1.6.7. - [Release notes](https://github.com/authlib/authlib/releases) - [Changelog](https://github.com/authlib/authlib/blob/main/docs/changelog.rst) - [Commits](https://github.com/authlib/authlib/compare/v1.6.6...v1.6.7) --- updated-dependencies: - dependency-name: authlib dependency-version: 1.6.7 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3eb047897..20d757689 100644 --- a/poetry.lock +++ b/poetry.lock @@ -239,14 +239,14 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a [[package]] name = "authlib" -version = "1.6.6" +version = "1.6.7" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd"}, - {file = "authlib-1.6.6.tar.gz", hash = "sha256:45770e8e056d0f283451d9996fbb59b70d45722b45d854d58f32878d0a40c38e"}, + {file = "authlib-1.6.7-py2.py3-none-any.whl", hash = "sha256:c637340d9a02789d2efa1d003a7437d10d3e565237bcb5fcbc6c134c7b95bab0"}, + {file = "authlib-1.6.7.tar.gz", hash = "sha256:dbf10100011d1e1b34048c9d120e83f13b35d69a826ae762b93d2fb5aafc337b"}, ] [package.dependencies] From 10c66a479ccba890c6b782222a0c30ba11f5d74b Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Fri, 27 Feb 2026 18:48:47 +0000 Subject: [PATCH 55/66] ELI-615 | campaign having recent - active start_date supersedes the others sharing same best-status --- .../calculators/eligibility_calculator.py | 88 +++-- .../in_process/test_eligibility_endpoint.py | 304 ++++++++---------- 2 files changed, 194 insertions(+), 198 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 9f83bd916..2a071b2bc 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -11,12 +11,12 @@ from eligibility_signposting_api.audit.audit_context import AuditContext from eligibility_signposting_api.model import campaign_config, eligibility_status from eligibility_signposting_api.model.eligibility_status import ( + BestIterationResult, CohortGroupResult, Condition, ConditionName, EligibilityStatus, IterationResult, - IterationResultSummary, Reason, Status, StatusText, @@ -32,6 +32,7 @@ from eligibility_signposting_api.model.campaign_config import ( CampaignConfig, CohortLabel, + IterationName, ) from eligibility_signposting_api.model.person import Person @@ -80,32 +81,31 @@ def get_the_best_cohort_memberships( return best_status, best_cohorts - def get_eligibility_status( - self, include_actions: str, conditions: list[str], requested_category: str - ) -> EligibilityStatus: + def get_eligibility_status(self, include_actions: str, conditions: list[str], category: str) -> EligibilityStatus: include_actions_flag = include_actions.upper() == "Y" condition_results: dict[ConditionName, IterationResult] = {} final_result = [] - requested_cc_with_active_iteration = ( - self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - self.campaign_configs, conditions, requested_category - ) + requested_grouped_campaigns = self.campaign_evaluator.get_requested_grouped_campaigns( + self.campaign_configs, conditions, category ) - for condition_name, campaign in requested_cc_with_active_iteration: - iteration_result_summary = self.evaluate_iteration_result_summary(campaign) + for condition_name, campaign_group in requested_grouped_campaigns: + best_iteration_result = self.get_best_iteration_result(campaign_group) + + if best_iteration_result is None: + continue matched_action_detail = self.action_rule_handler.get_actions( self.person, - iteration_result_summary.active_iteration, - iteration_result_summary.iteration_result, + best_iteration_result.active_iteration, + best_iteration_result.iteration_result, include_actions_flag=include_actions_flag, ) - iteration_result_summary = TokenProcessor.find_and_replace_tokens(self.person, iteration_result_summary) + best_iteration_result = TokenProcessor.find_and_replace_tokens(self.person, best_iteration_result) matched_action_detail = TokenProcessor.find_and_replace_tokens(self.person, matched_action_detail) - condition_results[condition_name] = iteration_result_summary.iteration_result + condition_results[condition_name] = best_iteration_result.iteration_result condition_results[condition_name].actions = matched_action_detail.actions condition: Condition = self.build_condition( @@ -116,35 +116,57 @@ def get_eligibility_status( AuditContext.append_audit_condition( condition_name, - iteration_result_summary, + best_iteration_result, matched_action_detail, ) # Consolidate all the results and return return eligibility_status.EligibilityStatus(conditions=final_result) - def evaluate_iteration_result_summary( - self, campaign_with_active_iteration: CampaignConfig - ) -> IterationResultSummary: - active_iteration = campaign_with_active_iteration.current_iteration - cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( - self.person, active_iteration - ) + def get_best_iteration_result(self, campaign_group: list[CampaignConfig]) -> BestIterationResult | None: + sorted_campaigns = sorted(campaign_group, key=lambda c: c.start_date, reverse=True) - # Determine Result between cohorts - get the best - status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) - status_text = self.get_status_text( - active_iteration.status_text, ConditionName(campaign_with_active_iteration.target), status - ) + iteration_results = self.get_iteration_results(sorted_campaigns) + + if not iteration_results: + return None - return IterationResultSummary( - IterationResult(status, status_text, best_cohorts, []), - active_iteration, - campaign_with_active_iteration.id, - campaign_with_active_iteration.version, - cohort_results, + (_best_iteration_name, best_iteration_result) = max( + iteration_results.items(), + key=lambda item: next(iter(item[1].cohort_results.values())).status.value + # Below handles the case where there are no cohort results + if item[1].cohort_results + else -1, ) + return best_iteration_result + + def get_iteration_results(self, campaign_group: list[CampaignConfig]) -> dict[IterationName, BestIterationResult]: + iteration_results: dict[IterationName, BestIterationResult] = {} + + for cc in campaign_group: + try: + active_iteration = cc.current_iteration + except StopIteration: + logger.info("Skipping campaign ID %s as no active iteration was found.", cc.id) + continue + cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( + self.person, active_iteration + ) + + # Determine Result between cohorts - get the best + status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) + status_text = self.get_status_text(active_iteration.status_text, ConditionName(cc.target), status) + + iteration_results[active_iteration.name] = BestIterationResult( + IterationResult(status, status_text, best_cohorts, []), + active_iteration, + cc.id, + cc.version, + cohort_results, + ) + return iteration_results + @staticmethod def get_status_text( status_text: campaign_config.StatusText | None, condition_name: ConditionName, status: Status diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index bc6fe2693..e0cc9d9d5 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1,5 +1,5 @@ import json -from datetime import UTC, date, datetime, timedelta +from datetime import date, timedelta, datetime, timezone from http import HTTPStatus import pytest @@ -17,13 +17,7 @@ has_key, ) -from eligibility_signposting_api.model.campaign_config import ( - CampaignConfig, - RuleAttributeLevel, - RuleComparator, - RuleOperator, - RuleType, -) +from eligibility_signposting_api.model.campaign_config import CampaignConfig, RuleComparator from eligibility_signposting_api.model.consumer_mapping import ConsumerId, ConsumerMapping from eligibility_signposting_api.model.eligibility_status import ( NHSNumber, @@ -32,18 +26,7 @@ from tests.fixtures.builders.model import rule from tests.integration.conftest import UNIQUE_CONSUMER_HEADER - -def today(): - return datetime.now(UTC).date() - - -def yesterday(): - return datetime.now(UTC).date() - timedelta(days=1) - - -def tomorrow(): - return datetime.now(UTC).date() + timedelta(days=1) - +today = lambda: datetime.now(timezone.utc).date() class TestBaseLine: def test_nhs_number_given( @@ -1210,14 +1193,14 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no [ ( [ - # Creates campaign configs by [target, campaign id, iteration status, iteration date] - ("RSV", "RSV_campaign_id_1", "active", today()), - ("RSV", "RSV_campaign_id_2", "active", today()), - ("RSV", "RSV_campaign_id_3", "active", today()), - ("RSV", "RSV_campaign_id_4", "active", yesterday()), - # inactive iteration - ("RSV", "inactive_RSV_campaign_id_5", "inactive", tomorrow()), - ("RSV", "RSV_campaign_id_6", "active", today()), + # Campaign configs in S3 + # Note: Configs are uploaded in order so the start date would be newer down the order. + ("RSV", "RSV_campaign_id_1"), + ("RSV", "RSV_campaign_id_2"), + ("RSV", "RSV_campaign_id_4"), + ("RSV", "RSV_campaign_id_3"), + ("RSV", "inactive_RSV_campaign_id_5", "inactive"), # inactive iteration + ("RSV", "RSV_campaign_id_6"), ], { # Consumer mappings in S3 @@ -1243,7 +1226,7 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no ], indirect=["campaign_configs", "consumer_mappings"], ) - def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( # noqa : PLR0913 + def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( self, client: FlaskClient, persisted_person: NHSNumber, @@ -1278,60 +1261,158 @@ def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_e else: assert_that(len(audit_data["response"]["condition"]), equal_to(0)) + def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target( # noqa : PLR0913 + self, + client: FlaskClient, + persisted_person_pc_sw19: NHSNumber, + s3_client: BaseClient, + consumer_mapping_bucket: BucketName, + rules_bucket: BucketName, + secretsmanager_client: BaseClient, # noqa: ARG002 + ): + # Given + consumer_id = "consumer-n3bs-jo4hn-ce4na" + headers = {"nhs-login-nhs-number": str(persisted_person_pc_sw19), UNIQUE_CONSUMER_HEADER: consumer_id} + + # Consumer Mapping Data + s3_client.put_object( + Bucket=consumer_mapping_bucket, + Key="consumer_mapping_config.json", + Body=json.dumps( + { + consumer_id: [ + {"CampaignConfigID": "RSV_campaign_id_not_actionable"}, + {"CampaignConfigID": "RSV_campaign_id_actionable"}, + ], + } + ), + ContentType="application/json", + ) + + # Campaign configs + campaign_1 = rule.CampaignConfigFactory.build( + id="RSV_campaign_id_not_actionable", + target="RSV", + type="V", + iterations=[ + rule.IterationFactory.build( + iteration_rules=[ + rule.PostcodeSuppressionRuleFactory.build(name="Exclude SW19", description=""), + ], + iteration_cohorts=[ + rule.IterationCohortFactory.build( + cohort_label="cohort1", + cohort_group="cohort_group1", + positive_description="positive_description", + ) + ], + status_text=None, + ) + ], + ) + + campaign_2 = rule.CampaignConfigFactory.build( + id="RSV_campaign_id_actionable", + target="RSV", + type="V", + iterations=[ + rule.IterationFactory.build( + iteration_rules=[ + rule.PostcodeSuppressionRuleFactory.build(name="Exclude M4", comparator=RuleComparator("M4")), + ], + iteration_cohorts=[ + rule.IterationCohortFactory.build( + cohort_label="cohort1", + cohort_group="cohort_group1", + positive_description="positive_description", + ) + ], + status_text=None, + ) + ], + ) + + for campaign in [campaign_1, campaign_2]: + s3_client.put_object( + Bucket=rules_bucket, + Key=f"{campaign.id}.json", + Body=json.dumps({"CampaignConfig": campaign.model_dump(by_alias=True)}), + ContentType="application/json", + ) + + # When + response = client.get(f"/patient-check/{persisted_person_pc_sw19}?includeActions=Y", headers=headers) + + # Then + assert_that( + response, + is_response() + .with_status_code(HTTPStatus.OK) + .and_text( + is_json_that( + has_entry( + "processedSuggestions", + equal_to( + [ + { + "condition": "RSV", + "status": "Actionable", + "eligibilityCohorts": [ + { + "cohortCode": "cohort_group1", + "cohortStatus": "Actionable", + "cohortText": "positive_description", + } + ], + "actions": [ + { + "actionCode": "action_code", + "actionType": "defaultcomms", + "description": "", + "urlLabel": "", + "urlLink": "", + } + ], + "suitabilityRules": [], + "statusText": "You should have the RSV vaccine", + } + ] + ), + ) + ) + ), + ) + @pytest.mark.parametrize( - ( - "campaign_1_start_date", - "campaign_2_start_date", - "postcode_for_comparator", - "cohort_for_comparator", - "expected_campaign_id", - ), + ("campaign_1_start_date", "campaign_2_start_date", "postcode_for_comparator", "expected_campaign_id"), [ ( ("RSV_campaign_id_1", today()), ("RSV_campaign_id_2", today() - timedelta(days=1)), - "SW19", # postcode for resulting in not-actionable (used by the suppression rule) - "cohort2", + "SW19", # postcode for resulting in not-actionable "RSV_campaign_id_1", ), ( ("RSV_campaign_id_1", today() - timedelta(days=1)), ("RSV_campaign_id_2", today()), "SW19", # postcode for resulting in not-actionable - "cohort2", "RSV_campaign_id_2", ), ( ("RSV_campaign_id_1", today()), ("RSV_campaign_id_2", today() - timedelta(days=1)), "M4", # postcode for resulting in actionable - "cohort2", "RSV_campaign_id_1", ), ( ("RSV_campaign_id_1", today() - timedelta(days=1)), ("RSV_campaign_id_2", today()), "M4", # postcode for resulting in actionable - "cohort2", - "RSV_campaign_id_2", - ), - ( - ("RSV_campaign_id_1", today()), - ("RSV_campaign_id_2", today() - timedelta(days=1)), - "M4", # cohort for resulting in not-eligible - "cohort1", - "RSV_campaign_id_1", - ), - ( - ("RSV_campaign_id_1", today() - timedelta(days=1)), - ("RSV_campaign_id_2", today()), - "M4", - "cohort1", # cohort for resulting in not-eligible (used by the filter rule) "RSV_campaign_id_2", ), ], ) - def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaign_with_diff_iteration_date( # noqa: PLR0913 + def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( self, client: FlaskClient, persisted_person_pc_sw19: NHSNumber, @@ -1343,7 +1424,6 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig campaign_1_start_date: tuple[str, date], campaign_2_start_date: tuple[str, date], postcode_for_comparator: str, - cohort_for_comparator: str, expected_campaign_id: NHSNumber, ): # Given @@ -1366,22 +1446,14 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig ) # Campaign configs - campaign_1 = rule.RawCampaignConfigFactory.build( + campaign_1 = rule.CampaignConfigFactory.build( id=campaign_1_start_date[0], target="RSV", start_date=campaign_1_start_date[1], type="V", iterations=[ rule.IterationFactory.build( - iteration_date=campaign_1_start_date[1], iteration_rules=[ - rule.IterationRuleFactory.build( - type=RuleType.filter, - name="Exclude if cohort matches", - attribute_level=RuleAttributeLevel.COHORT, - comparator=RuleComparator(cohort_for_comparator), - operator=RuleOperator.member_of, - ), rule.PostcodeSuppressionRuleFactory.build( name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) ), @@ -1398,14 +1470,13 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig ], ) - campaign_2 = rule.RawCampaignConfigFactory.build( + campaign_2 = rule.CampaignConfigFactory.build( id=campaign_2_start_date[0], target="RSV", type="V", start_date=campaign_2_start_date[1], iterations=[ rule.IterationFactory.build( - iteration_date=campaign_2_start_date[1], iteration_rules=[ rule.PostcodeSuppressionRuleFactory.build( name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) @@ -1445,100 +1516,3 @@ def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaig assert_that(audit_data["response"]["condition"][0].get("campaignId"), equal_to(expected_campaign_id)) else: assert_that(len(audit_data["response"]["condition"]), equal_to(0)) - - def test_if_multiple_active_iterations_with_same_iteration_datetime_for_the_same_target_throws_internal_error( # noqa: PLR0913 - self, - client: FlaskClient, - persisted_person_pc_sw19: NHSNumber, - s3_client: BaseClient, - consumer_mapping_bucket: BucketName, - rules_bucket: BucketName, - secretsmanager_client: BaseClient, # noqa: ARG002 - caplog, - ): - # Given - consumer_id = "consumer-n3bs-jo4hn-ce4na" - headers = {"nhs-login-nhs-number": str(persisted_person_pc_sw19), UNIQUE_CONSUMER_HEADER: consumer_id} - - # Consumer Mapping Data - s3_client.put_object( - Bucket=consumer_mapping_bucket, - Key="consumer_mapping_config.json", - Body=json.dumps( - { - consumer_id: [ - {"CampaignConfigID": "RSV_campaign_id_1"}, - {"CampaignConfigID": "RSV_campaign_id_2"}, - ], - } - ), - ContentType="application/json", - ) - previous_day = yesterday() - # Campaign configs - campaign_1 = rule.RawCampaignConfigFactory.build( - id="RSV_campaign_id_1", - target="RSV", - start_date=previous_day, - type="V", - iterations=[rule.IterationFactory.build(iteration_date=previous_day)], - ) - - campaign_2 = rule.RawCampaignConfigFactory.build( - id="RSV_campaign_id_2", - target="RSV", - start_date=previous_day, - type="V", - iterations=[rule.IterationFactory.build(iteration_date=previous_day)], - ) - - for campaign in [campaign_1, campaign_2]: - s3_client.put_object( - Bucket=rules_bucket, - Key=f"{campaign.id}.json", - Body=json.dumps({"CampaignConfig": campaign.model_dump(by_alias=True)}), - ContentType="application/json", - ) - - # When - response = client.get(f"/patient-check/{persisted_person_pc_sw19}", headers=headers) - - assert_that( - response, - is_response() - .with_status_code(HTTPStatus.INTERNAL_SERVER_ERROR) - .with_headers(has_entries({"Content-Type": "application/fhir+json"})) - .and_text( - is_json_that( - has_entries( - resourceType="OperationOutcome", - issue=contains_exactly( - has_entries( - severity="error", - code="processing", - diagnostics="An unexpected error occurred.", - details={ - "coding": [ - { - "system": "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", - "code": "INTERNAL_SERVER_ERROR", - "display": "An unexpected internal server error occurred.", - } - ] - }, - ) - ), - ) - ) - ), - ) - - err_msg = ( - "Ambiguous result: '2' active iterations " - "for target RSV " - f"found for date '{previous_day}' " - "across campaign(s) ['RSV_campaign_id_1', 'RSV_campaign_id_2']" - ) - assert any(err_msg in message for message in caplog.messages), ( - f"Expected log message not found. Logged messages: {caplog.messages}" - ) From e72c9c3a2f774d1a3a21f91e7e232eb2effa4e55 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:03:23 +0000 Subject: [PATCH 56/66] ELI-615 | wip --- .../calculators/eligibility_calculator.py | 6 +- .../services/processors/campaign_evaluator.py | 64 ++++--------- .../processors/test_campaign_evaluator.py | 96 +++++++------------ 3 files changed, 53 insertions(+), 113 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 2a071b2bc..bf880a24b 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -81,13 +81,13 @@ def get_the_best_cohort_memberships( return best_status, best_cohorts - def get_eligibility_status(self, include_actions: str, conditions: list[str], category: str) -> EligibilityStatus: + def get_eligibility_status(self, include_actions: str, conditions: list[str], requested_category: str) -> EligibilityStatus: include_actions_flag = include_actions.upper() == "Y" condition_results: dict[ConditionName, IterationResult] = {} final_result = [] - requested_grouped_campaigns = self.campaign_evaluator.get_requested_grouped_campaigns( - self.campaign_configs, conditions, category + requested_grouped_campaigns = self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + self.campaign_configs, conditions, requested_category ) for condition_name, campaign_group in requested_grouped_campaigns: best_iteration_result = self.get_best_iteration_result(campaign_group) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 9d1a9aad1..2b4f7140a 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -1,4 +1,3 @@ -import logging from collections.abc import Collection, Iterator from itertools import groupby from operator import attrgetter @@ -8,8 +7,6 @@ from eligibility_signposting_api.model import eligibility_status from eligibility_signposting_api.model.campaign_config import CampaignConfig -logger = logging.getLogger(__name__) - @service class CampaignEvaluator: @@ -18,43 +15,22 @@ class CampaignEvaluator: def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> list[CampaignConfig]: return [cc for cc in campaign_configs if cc.campaign_live] - def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConfig]) -> CampaignConfig | None: - """ - Returns the campaign with the latest active iteration date. - - 1. Collect all campaigns with an active iteration. - 2. Sort by iteration date (descending). - 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. - """ - - valid_items = [] - - for cc in active_campaigns: - try: - valid_items.append((cc.current_iteration.iteration_date, cc)) - except StopIteration: - logger.info( - "Skipping campaign ID %s as no active iteration was found.", - cc.id, - ) - - if not valid_items: - latest_campaign = None - else: - max_date = max(item[0] for item in valid_items) - cc_with_max_iteration_date: list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] - if len(cc_with_max_iteration_date) > 1: - err_msg = ( - f"Ambiguous result: '{len(cc_with_max_iteration_date)}' active iterations " - f"for target {cc_with_max_iteration_date[0].target} " - f"found for date '{max_date}' " - f"across campaign(s) {[cc.id for cc in cc_with_max_iteration_date]}" - ) - raise ValueError(err_msg) - - latest_campaign = cc_with_max_iteration_date[0] - - return latest_campaign + def get_latest_campaign(self, campaign_group: list[CampaignConfig]): + if not campaign_group: + return None + + latest_date = max(c.start_date for c in campaign_group) + + latest = [c for c in campaign_group if c.start_date == latest_date] + + if len(latest) == 1: + return latest[0] + + if len(latest) > 1: + raise ValueError( + f"Multiple campaigns share the latest start_date: {latest_date}") # TODO handle it in FHIR format + + return None def get_campaign_with_latest_active_iteration_per_target( self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str @@ -76,10 +52,6 @@ def get_campaign_with_latest_active_iteration_per_target( sorted(active_campaigns, key=attrgetter("target")), key=attrgetter("target"), ): - filtered_campaigns = [ - c for c in campaign_group if filter_all_conditions or str(condition_name) in conditions - ] + campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] - campaign = self.get_campaign_with_latest_iteration(filtered_campaigns) - if campaign is not None: - yield (condition_name, campaign) + yield condition_name, self.get_latest_campaign(campaigns) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index 4a0e1330f..1cdcaf737 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -16,16 +16,16 @@ def campaign_evaluator(): @pytest.mark.parametrize( ("campaign_target", "campaign_type", "conditions_filter", "category_filter", "expected_result"), [ - ("RSV", "V", ["RSV"], "VACCINATIONS", ("RSV", "V")), - ("RSV", "V", ["COVID"], "VACCINATIONS", None), - ("RSV", "S", ["RSV"], "ALL", ("RSV", "S")), - ("RSV", "S", ["ALL"], "ALL", ("RSV", "S")), - ("RSV", "S", ["RSV"], "VACCINATIONS", None), - ("RSV", "V", ["RSV"], "ALL", ("RSV", "V")), - ("FLU", "V", ["COVID", "RSV"], "ALL", None), - ("FLU", "S", ["ALL"], "ALL", ("FLU", "S")), - ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", None), - ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", ("FLU", "V")), + ("RSV", "V", ["RSV"], "VACCINATIONS", [("RSV", "V")]), + ("RSV", "V", ["COVID"], "VACCINATIONS", []), + ("RSV", "S", ["RSV"], "ALL", [("RSV", "S")]), + ("RSV", "S", ["ALL"], "ALL", [("RSV", "S")]), + ("RSV", "S", ["RSV"], "VACCINATIONS", []), + ("RSV", "V", ["RSV"], "ALL", [("RSV", "V")]), + ("FLU", "V", ["COVID", "RSV"], "ALL", []), + ("FLU", "S", ["ALL"], "ALL", [("FLU", "S")]), + ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", []), + ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", [("FLU", "V")]), ], ) def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 @@ -33,12 +33,8 @@ def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 ): campaign = rule.CampaignConfigFactory.build(target=campaign_target, type=campaign_type) - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - [campaign], conditions_filter, category_filter - ) - - actual = next(((str(name), camp.type) for name, camp in result if camp is not None), None) - assert actual == expected_result + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], conditions_filter, category_filter) + assert_that([(str(name), group[0].type) for name, group in result], is_(expected_result)) def test_campaigns_grouped_by_condition_name_with_no_campaigns(campaign_evaluator): @@ -51,9 +47,7 @@ def test_campaigns_grouped_by_condition_name_with_no_active_campaigns(campaign_e target="RSV", type="V", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) ) - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - [campaign], ["RSV"], "VACCINATIONS" - ) + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["RSV"], "VACCINATIONS") assert_that(list(result), is_([])) @@ -69,9 +63,7 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( campaign_evaluator, category_filter, campaign_type, expected_count ): campaign = rule.CampaignConfigFactory.build(target="COVID", type=campaign_type) - result = list( - campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["COVID"], category_filter) - ) + result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["COVID"], category_filter)) assert_that(len(result), is_(expected_count)) if expected_count > 0: assert_that(str(result[0][0]), is_("COVID")) @@ -79,71 +71,47 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( def test_campaigns_grouped_by_condition_name_with_empty_conditions_filter(campaign_evaluator): campaign = rule.CampaignConfigFactory.build(target="RSV", type="V") - result = list( - campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") - ) - - assert_that(result, is_([])) + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") + assert_that(list(result), is_([])) def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_target(campaign_evaluator): - # providing the start_date here, because CampaignConfigFactory used it for iteration_date - campaign1 = rule.CampaignConfigFactory.build( - target="COVID", - type="V", - id="C1", - start_date=datetime.datetime.now(datetime.UTC).date() - datetime.timedelta(days=1), - iterations=[rule.IterationFactory.build()], - ) - campaign2 = rule.CampaignConfigFactory.build( - target="COVID", - type="V", - id="C2", - start_date=datetime.datetime.now(datetime.UTC).date(), - iterations=[rule.IterationFactory.build()], - ) + campaign1 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C1") + campaign2 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C2") campaign3 = rule.CampaignConfigFactory.build(target="FLU", type="V", id="F1") inactive_campaign = rule.CampaignConfigFactory.build( target="COVID", type="V", id="C3", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) ) all_campaigns = [campaign1, campaign2, campaign3, inactive_campaign] - result = list( - campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - all_campaigns, ["COVID", "FLU"], "VACCINATIONS" - ) - ) + result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target(all_campaigns, ["COVID", "FLU"], "VACCINATIONS")) assert_that(len(result), is_(2)) - result_dict = {str(name): campaign for name, campaign in result} - + result_dict = {str(name): campaigns for name, campaigns in result} assert_that("COVID" in result_dict) assert_that("FLU" in result_dict) - assert_that(result_dict["COVID"].id, is_(CampaignID("C2"))) - assert_that(result_dict["FLU"].id, is_(CampaignID("F1"))) + assert_that(len(result_dict["COVID"]), is_(2)) + assert_that({c.id for c in result_dict["COVID"]}, is_({CampaignID("C1"), CampaignID("C2")})) + + assert_that(len(result_dict["FLU"]), is_(1)) + assert_that(result_dict["FLU"][0].id, is_(CampaignID("F1"))) -def test_campaign_grouping_is_not_affected_by_order_for_mixed_types(campaign_evaluator): +def test_campaign_grouping_is_affected_by_order_for_mixed_types(campaign_evaluator): campaign_v = rule.CampaignConfigFactory.build(target="RSV", type="V") campaign_s = rule.CampaignConfigFactory.build(target="RSV", type="S") - # Order: S then V + evaluator_s_first = campaign_evaluator result_s_first = list( - campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - [campaign_s, campaign_v], ["RSV"], "VACCINATIONS" - ) + evaluator_s_first.get_campaign_with_latest_active_iteration_per_target([campaign_s, campaign_v], ["RSV"], "VACCINATIONS") ) - # Even if S is first, it is filtered out by 'allowed_types' - assert_that(len(result_s_first), is_(1)) - assert_that(result_s_first[0][1].type, is_("V")) + assert_that(result_s_first, is_([])) - # Order: V then S + evaluator_v_first = campaign_evaluator result_v_first = list( - campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - [campaign_v, campaign_s], ["RSV"], "VACCINATIONS" - ) + evaluator_v_first.get_campaign_with_latest_active_iteration_per_target([campaign_v, campaign_s], ["RSV"], "VACCINATIONS") ) assert_that(len(result_v_first), is_(1)) - assert_that(result_v_first[0][1].type, is_("V")) + assert_that(len(result_v_first[0][1]), is_(2)) From 859b2b37c2c77951baa2e75405df17bcbf2354f0 Mon Sep 17 00:00:00 2001 From: Robert Bailiff Date: Tue, 3 Mar 2026 10:26:20 +0000 Subject: [PATCH 57/66] Added vulture to workflows (#585) * Added vulture to workflows * Added new make commands and added to project * Added updated lockfile * Minimal config with no errors * Corrected vulture commands * Generating new lock file --- poetry.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/poetry.lock b/poetry.lock index 20d757689..bace7d18e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -239,14 +239,14 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a [[package]] name = "authlib" -version = "1.6.7" +version = "1.6.6" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "authlib-1.6.7-py2.py3-none-any.whl", hash = "sha256:c637340d9a02789d2efa1d003a7437d10d3e565237bcb5fcbc6c134c7b95bab0"}, - {file = "authlib-1.6.7.tar.gz", hash = "sha256:dbf10100011d1e1b34048c9d120e83f13b35d69a826ae762b93d2fb5aafc337b"}, + {file = "authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd"}, + {file = "authlib-1.6.6.tar.gz", hash = "sha256:45770e8e056d0f283451d9996fbb59b70d45722b45d854d58f32878d0a40c38e"}, ] [package.dependencies] @@ -992,14 +992,14 @@ tzdata = "*" [[package]] name = "flask" -version = "3.1.3" +version = "3.1.2" description = "A simple framework for building complex web applications." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "flask-3.1.3-py3-none-any.whl", hash = "sha256:f4bcbefc124291925f1a26446da31a5178f9483862233b23c0c96a20701f670c"}, - {file = "flask-3.1.3.tar.gz", hash = "sha256:0ef0e52b8a9cd932855379197dd8f94047b359ca0a78695144304cb45f87c9eb"}, + {file = "flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c"}, + {file = "flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87"}, ] [package.dependencies] @@ -3468,4 +3468,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "efa2bdebc236df181c8f5fa5a9b61a84c84ae10e5e377ff94139aa2ecf390053" +content-hash = "4456e8d9141a4581c9fc2a1bda3c779fe194359c2d5a1588fe180563afb9b2b6" From dafe918349ec53cb8a2db7e8d9faabc2d62d1d40 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 3 Mar 2026 14:46:02 +0000 Subject: [PATCH 58/66] ELI-615 | campaign_configs - fixture updated | test case fixed --- .../services/processors/campaign_evaluator.py | 48 +++++++++++++------ tests/integration/conftest.py | 26 +++++----- .../in_process/test_eligibility_endpoint.py | 33 ++++++++----- 3 files changed, 68 insertions(+), 39 deletions(-) diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 2b4f7140a..7c5968a7d 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -1,3 +1,4 @@ +import logging from collections.abc import Collection, Iterator from itertools import groupby from operator import attrgetter @@ -7,6 +8,7 @@ from eligibility_signposting_api.model import eligibility_status from eligibility_signposting_api.model.campaign_config import CampaignConfig +logger = logging.getLogger(__name__) @service class CampaignEvaluator: @@ -15,24 +17,42 @@ class CampaignEvaluator: def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> list[CampaignConfig]: return [cc for cc in campaign_configs if cc.campaign_live] - def get_latest_campaign(self, campaign_group: list[CampaignConfig]): - if not campaign_group: - return None + def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConfig]) -> CampaignConfig | None: - latest_date = max(c.start_date for c in campaign_group) + """ + Returns the campaign with the latest active iteration date. - latest = [c for c in campaign_group if c.start_date == latest_date] + 1. Collect all campaigns with an active iteration. + 2. Sort by iteration date (descending). + 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. + """ - if len(latest) == 1: - return latest[0] + valid_items = [] - if len(latest) > 1: - raise ValueError( - f"Multiple campaigns share the latest start_date: {latest_date}") # TODO handle it in FHIR format + for cc in active_campaigns: + try: + valid_items.append((cc.current_iteration.iteration_date, cc)) + except StopIteration: + logger.info( + "Skipping campaign ID %s as no active iteration was found.", + cc.id, + ) - return None + if not valid_items: + latest_campaign = None + else: + max_date = max(item[0] for item in valid_items) + cc_with_max_iteration_date:list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] + if len(cc_with_max_iteration_date) > 1: + raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations " + f"for target {cc_with_max_iteration_date[0].current_iteration.iteration_date}" + f"found for date {max_date}") - def get_campaign_with_latest_active_iteration_per_target( + latest_campaign = cc_with_max_iteration_date[0] + + return latest_campaign + + def get_campaign_with_latest_active_iteration_per_target( self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig]]: mapping = { @@ -52,6 +72,6 @@ def get_campaign_with_latest_active_iteration_per_target( sorted(active_campaigns, key=attrgetter("target")), key=attrgetter("target"), ): - campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] + filtered_campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] - yield condition_name, self.get_latest_campaign(campaigns) + yield condition_name, self.get_campaign_with_latest_iteration(filtered_campaigns) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 41098728a..be2ac01cf 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -586,7 +586,7 @@ def firehose_delivery_stream(firehose_client: BaseClient, audit_bucket: BucketNa return firehose_client.describe_delivery_stream(DeliveryStreamName=stream_name) -@pytest.fixture +@pytest.fixture(scope="class") def rsv_campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -696,7 +696,7 @@ def campaign_config_with_rules_having_rule_mapper( s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture +@pytest.fixture(scope="class") def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: campaigns, campaign_data_keys = [], [] @@ -738,7 +738,7 @@ def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) - s3_client.delete_object(Bucket=rules_bucket, Key=key) -@pytest.fixture +@pytest.fixture(scope="class") def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -776,7 +776,7 @@ def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketNam s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture +@pytest.fixture(scope="class") def campaign_config_with_tokens(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -843,7 +843,7 @@ def campaign_config_with_tokens(s3_client: BaseClient, rules_bucket: BucketName) s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture +@pytest.fixture(scope="class") def campaign_config_with_invalid_tokens(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -1057,7 +1057,7 @@ def campaign_config_with_custom_target_attributes( s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture +@pytest.fixture(scope="class") def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: """Create and upload multiple campaign configs to S3, then clean up after tests.""" campaigns, campaign_data_keys = [], [] @@ -1121,7 +1121,7 @@ def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) - s3_client.delete_object(Bucket=rules_bucket, Key=key) -@pytest.fixture +@pytest.fixture(scope="class") def campaign_config_with_virtual_cohort(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="COVID", @@ -1144,7 +1144,7 @@ def campaign_config_with_virtual_cohort(s3_client: BaseClient, rules_bucket: Buc s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture +@pytest.fixture(scope="class") def campaign_config_with_missing_descriptions_missing_rule_text( s3_client: BaseClient, rules_bucket: BucketName ) -> Generator[CampaignConfig]: @@ -1265,7 +1265,7 @@ def create_and_put_consumer_mapping_in_s3( return consumer_mapping -@pytest.fixture +@pytest.fixture(scope="class") def consumer_to_active_campaign_having_invalid_tokens_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1279,7 +1279,7 @@ def consumer_to_active_campaign_having_invalid_tokens_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture +@pytest.fixture(scope="class") def consumer_to_active_campaign_having_tokens_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1293,7 +1293,7 @@ def consumer_to_active_campaign_having_tokens_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture +@pytest.fixture(scope="class") def consumer_to_active_rsv_campaign_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1307,7 +1307,7 @@ def consumer_to_active_rsv_campaign_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture +@pytest.fixture(scope="function") def consumer_to_active_campaign_having_and_rule_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1456,7 +1456,7 @@ def consumer_to_campaign_having_inactive_iteration_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture +@pytest.fixture(scope="class") def consumer_to_multiple_campaign_configs_mapping( multiple_campaign_configs: list[CampaignConfig], consumer_id: ConsumerId, diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index e0cc9d9d5..466f0e575 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -1,5 +1,5 @@ import json -from datetime import date, timedelta, datetime, timezone +from datetime import UTC, date, datetime, timedelta from http import HTTPStatus import pytest @@ -26,7 +26,16 @@ from tests.fixtures.builders.model import rule from tests.integration.conftest import UNIQUE_CONSUMER_HEADER -today = lambda: datetime.now(timezone.utc).date() + +def today(): + return datetime.now(UTC).date() + +def yesterday(): + return datetime.now(UTC).date()- timedelta(days=1) + +def tomorrow(): + return datetime.now(UTC).date()+ timedelta(days=1) + class TestBaseLine: def test_nhs_number_given( @@ -1193,14 +1202,14 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no [ ( [ - # Campaign configs in S3 - # Note: Configs are uploaded in order so the start date would be newer down the order. - ("RSV", "RSV_campaign_id_1"), - ("RSV", "RSV_campaign_id_2"), - ("RSV", "RSV_campaign_id_4"), - ("RSV", "RSV_campaign_id_3"), - ("RSV", "inactive_RSV_campaign_id_5", "inactive"), # inactive iteration - ("RSV", "RSV_campaign_id_6"), + # Creates campaign configs by [target, campaign id, iteration status, iteration date] + ("RSV", "RSV_campaign_id_1", "active", today()), + ("RSV", "RSV_campaign_id_2", "active",today()), + ("RSV", "RSV_campaign_id_3", "active", today()), + ("RSV", "RSV_campaign_id_4", "active", yesterday()), + # inactive iteration + ("RSV", "inactive_RSV_campaign_id_5", "inactive", tomorrow()), + ("RSV", "RSV_campaign_id_6", "active", today()), ], { # Consumer mappings in S3 @@ -1226,7 +1235,7 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no ], indirect=["campaign_configs", "consumer_mappings"], ) - def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( + def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_exists_per_target_giving_same_status( # noqa : PLR0913 self, client: FlaskClient, persisted_person: NHSNumber, @@ -1412,7 +1421,7 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa ), ], ) - def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( + def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( # noqa : PLR0913 self, client: FlaskClient, persisted_person_pc_sw19: NHSNumber, From 73d584bf9b35b8b9f5ca81a529f00d046e69e75c Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Wed, 4 Mar 2026 10:56:37 +0000 Subject: [PATCH 59/66] ELI-615 | renamed best_iteration_result to iteration_result_summary --- .../calculators/eligibility_calculator.py | 89 ++++++++----------- 1 file changed, 35 insertions(+), 54 deletions(-) diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index bf880a24b..470c8a860 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -11,12 +11,12 @@ from eligibility_signposting_api.audit.audit_context import AuditContext from eligibility_signposting_api.model import campaign_config, eligibility_status from eligibility_signposting_api.model.eligibility_status import ( - BestIterationResult, CohortGroupResult, Condition, ConditionName, EligibilityStatus, IterationResult, + IterationResultSummary, Reason, Status, StatusText, @@ -32,7 +32,6 @@ from eligibility_signposting_api.model.campaign_config import ( CampaignConfig, CohortLabel, - IterationName, ) from eligibility_signposting_api.model.person import Person @@ -81,31 +80,35 @@ def get_the_best_cohort_memberships( return best_status, best_cohorts - def get_eligibility_status(self, include_actions: str, conditions: list[str], requested_category: str) -> EligibilityStatus: + def get_eligibility_status( + self, include_actions: str, conditions: list[str], requested_category: str + ) -> EligibilityStatus: include_actions_flag = include_actions.upper() == "Y" condition_results: dict[ConditionName, IterationResult] = {} final_result = [] - requested_grouped_campaigns = self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( - self.campaign_configs, conditions, requested_category + requested_cc_with_active_iteration = ( + self.campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + self.campaign_configs, conditions, requested_category + ) ) - for condition_name, campaign_group in requested_grouped_campaigns: - best_iteration_result = self.get_best_iteration_result(campaign_group) + for condition_name, campaign in requested_cc_with_active_iteration: + if campaign is None: + continue # skipping as no active iteration was found. - if best_iteration_result is None: - continue + iteration_result_summary = self.evaluate_iteration_result_summary(campaign) matched_action_detail = self.action_rule_handler.get_actions( self.person, - best_iteration_result.active_iteration, - best_iteration_result.iteration_result, + iteration_result_summary.active_iteration, + iteration_result_summary.iteration_result, include_actions_flag=include_actions_flag, ) - best_iteration_result = TokenProcessor.find_and_replace_tokens(self.person, best_iteration_result) + iteration_result_summary = TokenProcessor.find_and_replace_tokens(self.person, iteration_result_summary) matched_action_detail = TokenProcessor.find_and_replace_tokens(self.person, matched_action_detail) - condition_results[condition_name] = best_iteration_result.iteration_result + condition_results[condition_name] = iteration_result_summary.iteration_result condition_results[condition_name].actions = matched_action_detail.actions condition: Condition = self.build_condition( @@ -116,56 +119,34 @@ def get_eligibility_status(self, include_actions: str, conditions: list[str], re AuditContext.append_audit_condition( condition_name, - best_iteration_result, + iteration_result_summary, matched_action_detail, ) # Consolidate all the results and return return eligibility_status.EligibilityStatus(conditions=final_result) - def get_best_iteration_result(self, campaign_group: list[CampaignConfig]) -> BestIterationResult | None: - sorted_campaigns = sorted(campaign_group, key=lambda c: c.start_date, reverse=True) - - iteration_results = self.get_iteration_results(sorted_campaigns) - - if not iteration_results: - return None - - (_best_iteration_name, best_iteration_result) = max( - iteration_results.items(), - key=lambda item: next(iter(item[1].cohort_results.values())).status.value - # Below handles the case where there are no cohort results - if item[1].cohort_results - else -1, + def evaluate_iteration_result_summary( + self, campaign_with_active_iteration: CampaignConfig + ) -> IterationResultSummary: + active_iteration = campaign_with_active_iteration.current_iteration + cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( + self.person, active_iteration ) - return best_iteration_result - - def get_iteration_results(self, campaign_group: list[CampaignConfig]) -> dict[IterationName, BestIterationResult]: - iteration_results: dict[IterationName, BestIterationResult] = {} - - for cc in campaign_group: - try: - active_iteration = cc.current_iteration - except StopIteration: - logger.info("Skipping campaign ID %s as no active iteration was found.", cc.id) - continue - cohort_results: dict[CohortLabel, CohortGroupResult] = self.rule_processor.get_cohort_group_results( - self.person, active_iteration - ) - - # Determine Result between cohorts - get the best - status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) - status_text = self.get_status_text(active_iteration.status_text, ConditionName(cc.target), status) + # Determine Result between cohorts - get the best + status, best_cohorts = self.get_the_best_cohort_memberships(cohort_results) + status_text = self.get_status_text( + active_iteration.status_text, ConditionName(campaign_with_active_iteration.target), status + ) - iteration_results[active_iteration.name] = BestIterationResult( - IterationResult(status, status_text, best_cohorts, []), - active_iteration, - cc.id, - cc.version, - cohort_results, - ) - return iteration_results + return IterationResultSummary( + IterationResult(status, status_text, best_cohorts, []), + active_iteration, + campaign_with_active_iteration.id, + campaign_with_active_iteration.version, + cohort_results, + ) @staticmethod def get_status_text( From eb639c02c86a64384cac8e00b9caea7487d563e0 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 12:27:32 +0000 Subject: [PATCH 60/66] ELI-674 - revert - rebasing --- .../instructions/code-review.instructions.md | 65 ---- .github/instructions/python.instructions.md | 93 ------ poetry.lock | 10 +- .../calculators/eligibility_calculator.py | 3 - .../services/processors/campaign_evaluator.py | 38 ++- tests/integration/conftest.py | 26 +- .../in_process/test_eligibility_endpoint.py | 284 +++++++++--------- .../processors/test_campaign_evaluator.py | 96 ++++-- 8 files changed, 256 insertions(+), 359 deletions(-) delete mode 100644 .github/instructions/code-review.instructions.md delete mode 100644 .github/instructions/python.instructions.md diff --git a/.github/instructions/code-review.instructions.md b/.github/instructions/code-review.instructions.md deleted file mode 100644 index 5bf9ebb4c..000000000 --- a/.github/instructions/code-review.instructions.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: "Code review instructions for the eligibility-signposting-api project" -applyTo: "**" -excludeAgent: ["coding-agent"] ---- - -# Code Review Instructions - -Guidelines for the eligibility-signposting-api project — a serverless AWS Lambda + Flask eligibility rules engine. - -## Review Priorities - -### Critical (block merge) - -- **Security**: Exposed secrets, PII leakage (especially NHS Numbers), missing header validation -- **Correctness**: Logic errors in rules engine evaluation, incorrect operator behaviour, data corruption in DynamoDB -- **Breaking Changes**: API contract changes to FHIR response models or request validation - -### Important (requires discussion) - -- **Code Quality**: SOLID violations, excessive duplication -- **Test Coverage**: Missing tests for critical paths, new rules/operators, or edge cases -- **Performance**: Unnecessary DynamoDB scans, missing caching, Lambda cold start regressions -- **Architecture**: Deviations from established patterns (wireup DI, chain of responsibility, operator registry) - -### Suggestion (non-blocking) - -- **Readability**: Naming, simplification of complex logic -- **Best Practices**: Minor convention deviations -- **Documentation**: Missing or incomplete docstrings - -## Security - -- **PII handling**: NHS Numbers must never appear in logs or error messages. `TokenError` messages must be redacted. Verify new log statements do not leak person data. -- **Secrets**: No API keys, tokens, or secrets in code. Use environment variables or AWS Secrets Manager. -- **NHS Number hashing**: Lookups use HMAC-SHA512 via `HashingService` with secret rotation (AWSCURRENT → AWSPREVIOUS fallback). -- **Header validation**: `NHSE-Product-ID` must be present (403 if missing). `nhs-login-nhs-number` must match path parameter. -- **Security headers**: Responses must include `Cache-Control: no-store, private`, `Strict-Transport-Security`, `X-Content-Type-Options: nosniff`. - -## Architecture - -- **Dependency injection**: Use wireup `@service` for all services, repos, and factories. Inject via `Injected[T]`, `Inject(qualifier=...)`, or `Inject(param=...)`. Never instantiate services manually. -- **Chain of responsibility**: Processing follows `CohortEligibilityHandler → BaseEligibilityHandler → FilterRuleHandler → SuppressionRuleHandler`. Extend this chain for new steps. -- **Operator registry**: New operators must extend `hamcrest.BaseMatcher` and register via the decorator-based `OperatorRegistry`. -- **Pydantic models**: Use `Field(alias=...)` for JSON mapping, `field_validator`/`model_validator` for validation. Response models use camelCase aliases. -- **FHIR compliance**: Error responses must use `OperationOutcome` models with `application/fhir+json` content type. -- **Lambda reuse**: The Flask app is cached in `CacheManager` across invocations. Changes to app initialization must not break container reuse. - -## Performance - -- **DynamoDB**: Use `query()` with `KeyConditionExpression`, never `scan()`. Partition key is `NHS_NUMBER`, sort key discriminator is `ATTRIBUTE_TYPE`. -- **S3 configuration loading**: Campaign configs load from S3 per request. Avoid unnecessary `list_objects` or `get_object` calls. -- **Caching**: Feature toggles use `TTLCache` (300s). New caching should follow the same pattern with appropriate TTLs. -- **Lambda cold starts**: Avoid heavy imports at module level. Keep wireup service graph lean. - -## Audit Trail - -- **Completeness**: New eligibility logic must call `AuditContext.append_audit_condition()` to record evaluation details. -- **Firehose delivery**: Audit events use Pydantic `AuditEvent` models sent to Kinesis Firehose. Preserve the full audit data model. - -## Terraform - -- **Encryption**: All AWS resources (DynamoDB, S3, Lambda, Firehose, Secrets Manager) must use KMS CMK encryption. -- **Environment parity**: Verify deletion protection and PITR are enabled for production/pre-production DynamoDB tables. -- **Safety**: Terraform changes must not destroy or replace stateful resources (DynamoDB tables, S3 buckets) unintentionally. diff --git a/.github/instructions/python.instructions.md b/.github/instructions/python.instructions.md deleted file mode 100644 index d5dd89b48..000000000 --- a/.github/instructions/python.instructions.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -description: "Python coding standards for the eligibility-signposting-api project" -applyTo: "**/*.py" ---- - -# Python Coding Standards - -## Naming Conventions - -- `snake_case` for functions, variables, and module names. -- `PascalCase` for class names. -- `UPPER_SNAKE_CASE` for constants. -- Prefix private methods and attributes with `_`. - -## Code Style - -- Line length limit: 120 characters (enforced by ruff). -- Use type hints for all function signatures and return types. -- Prefer `dataclass` for simple domain objects, Pydantic `BaseModel` for validated/serialized models. -- Use `StrEnum` for string enumerations. -- Avoid bare `except:` — catch specific exceptions. - -## Error Handling - -```python -# Bad: silent failure -try: - person = repo.get(nhs_number) -except Exception: - pass - -# Good: specific exceptions with context -try: - person = repo.get(nhs_number) -except PersonNotFoundError: - raise -except ClientError as e: - raise RepositoryError(f"Failed to query person table: {e}") from e -``` - -## Dependency Injection (wireup) - -- Decorate services with `@service`. Do not instantiate services manually. -- Use `Inject(qualifier=...)` for AWS client disambiguation. -- Use `Inject(param=...)` for configuration values. -- Register factory functions with `@service` for boto3 clients. - -```python -# Good -@service -class MyService: - def __init__(self, repo: Injected[MyRepo]) -> None: - self._repo = repo - -# Bad: manual instantiation -class MyService: - def __init__(self) -> None: - self._repo = MyRepo() -``` - -## Pydantic Models - -- Use `Field(alias=...)` for JSON key mapping. -- Use `field_validator` / `model_validator` for custom validation. -- Response models must use camelCase aliases (`alias_generator=to_camel` or explicit aliases). -- Use `model_dump(by_alias=True)` when serializing for API responses. - -## Testing - -- Use `pytest` with pyHamcrest assertions (`assert_that`, `is_`, `has_entries`, `contains_exactly`, etc.). -- Use `brunns-matchers` for Werkzeug response assertions. -- Use project auto-matchers (`BaseAutoMatcher`) for dataclass/Pydantic model assertions. -- Use `polyfactory` (`DataclassFactory` / `ModelFactory`) for test data builders. -- Mock AWS services with `moto`, not manual stubs. -- Use `@pytest.mark.parametrize` for rule/operator test cases. - -```python -# Good: pyHamcrest with specific matchers -def test_eligible_person_returns_eligible_status(): - result = evaluate(person, campaign) - assert_that(result, is_(has_property("status", equal_to(Status.ELIGIBLE)))) - -# Bad: generic assert -def test_eligible(): - result = evaluate(person, campaign) - assert result is not None -``` - -## Logging - -- Use structured JSON logging via `python-json-logger`. -- Never log NHS Numbers or other PII. -- Include `request_id` via the `ContextVar` pattern for request tracing. diff --git a/poetry.lock b/poetry.lock index bace7d18e..3eb047897 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -992,14 +992,14 @@ tzdata = "*" [[package]] name = "flask" -version = "3.1.2" +version = "3.1.3" description = "A simple framework for building complex web applications." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c"}, - {file = "flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87"}, + {file = "flask-3.1.3-py3-none-any.whl", hash = "sha256:f4bcbefc124291925f1a26446da31a5178f9483862233b23c0c96a20701f670c"}, + {file = "flask-3.1.3.tar.gz", hash = "sha256:0ef0e52b8a9cd932855379197dd8f94047b359ca0a78695144304cb45f87c9eb"}, ] [package.dependencies] @@ -3468,4 +3468,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.13" -content-hash = "4456e8d9141a4581c9fc2a1bda3c779fe194359c2d5a1588fe180563afb9b2b6" +content-hash = "efa2bdebc236df181c8f5fa5a9b61a84c84ae10e5e377ff94139aa2ecf390053" diff --git a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py index 470c8a860..9f83bd916 100644 --- a/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py +++ b/src/eligibility_signposting_api/services/calculators/eligibility_calculator.py @@ -93,9 +93,6 @@ def get_eligibility_status( ) ) for condition_name, campaign in requested_cc_with_active_iteration: - if campaign is None: - continue # skipping as no active iteration was found. - iteration_result_summary = self.evaluate_iteration_result_summary(campaign) matched_action_detail = self.action_rule_handler.get_actions( diff --git a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py index 7c5968a7d..248c1c967 100644 --- a/src/eligibility_signposting_api/services/processors/campaign_evaluator.py +++ b/src/eligibility_signposting_api/services/processors/campaign_evaluator.py @@ -10,6 +10,7 @@ logger = logging.getLogger(__name__) + @service class CampaignEvaluator: """Filters and groups campaign configurations.""" @@ -18,20 +19,19 @@ def get_active_campaigns(self, campaign_configs: Collection[CampaignConfig]) -> return [cc for cc in campaign_configs if cc.campaign_live] def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConfig]) -> CampaignConfig | None: - """ - Returns the campaign with the latest active iteration date. + Returns the campaign with the latest active iteration date. - 1. Collect all campaigns with an active iteration. - 2. Sort by iteration date (descending). - 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. + 1. Collect all campaigns with an active iteration. + 2. Sort by iteration date (descending). + 3. Extract the lead campaign, throwing an error if a tie for the latest date exists. """ valid_items = [] for cc in active_campaigns: try: - valid_items.append((cc.current_iteration.iteration_date, cc)) + valid_items.append((cc.current_iteration.iteration_datetime, cc)) except StopIteration: logger.info( "Skipping campaign ID %s as no active iteration was found.", @@ -41,18 +41,24 @@ def get_campaign_with_latest_iteration(self, active_campaigns: list[CampaignConf if not valid_items: latest_campaign = None else: - max_date = max(item[0] for item in valid_items) - cc_with_max_iteration_date:list[CampaignConfig] = [item[1] for item in valid_items if item[0] == max_date] + max_date_time = max(item[0] for item in valid_items) + cc_with_max_iteration_date: list[CampaignConfig] = [ + item[1] for item in valid_items if item[0] == max_date_time + ] if len(cc_with_max_iteration_date) > 1: - raise ValueError(f"Ambiguous result: {len(cc_with_max_iteration_date)} iterations " - f"for target {cc_with_max_iteration_date[0].current_iteration.iteration_date}" - f"found for date {max_date}") + err_msg = ( + f"Ambiguous result: '{len(cc_with_max_iteration_date)}' active iterations " + f"for target {cc_with_max_iteration_date[0].target} " + f"found for datetime '{max_date_time}' " + f"across campaign(s) {[cc.id for cc in cc_with_max_iteration_date]}" + ) + raise ValueError(err_msg) latest_campaign = cc_with_max_iteration_date[0] return latest_campaign - def get_campaign_with_latest_active_iteration_per_target( + def get_campaign_with_latest_active_iteration_per_target( self, campaign_configs: Collection[CampaignConfig], conditions: list[str], requested_category: str ) -> Iterator[tuple[eligibility_status.ConditionName, CampaignConfig]]: mapping = { @@ -72,6 +78,10 @@ def get_campaign_with_latest_active_iteration_per_target( sorted(active_campaigns, key=attrgetter("target")), key=attrgetter("target"), ): - filtered_campaigns = [c for c in allowed_campaigns if filter_all_conditions or str(condition_name) in conditions] + filtered_campaigns = [ + c for c in campaign_group if filter_all_conditions or str(condition_name) in conditions + ] - yield condition_name, self.get_campaign_with_latest_iteration(filtered_campaigns) + campaign = self.get_campaign_with_latest_iteration(filtered_campaigns) + if campaign is not None: + yield (condition_name, campaign) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index be2ac01cf..41098728a 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -586,7 +586,7 @@ def firehose_delivery_stream(firehose_client: BaseClient, audit_bucket: BucketNa return firehose_client.describe_delivery_stream(DeliveryStreamName=stream_name) -@pytest.fixture(scope="class") +@pytest.fixture def rsv_campaign_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -696,7 +696,7 @@ def campaign_config_with_rules_having_rule_mapper( s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: campaigns, campaign_data_keys = [], [] @@ -738,7 +738,7 @@ def inactive_iteration_config(s3_client: BaseClient, rules_bucket: BucketName) - s3_client.delete_object(Bucket=rules_bucket, Key=key) -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -776,7 +776,7 @@ def campaign_config_with_and_rule(s3_client: BaseClient, rules_bucket: BucketNam s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_tokens(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -843,7 +843,7 @@ def campaign_config_with_tokens(s3_client: BaseClient, rules_bucket: BucketName) s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_invalid_tokens(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="RSV", @@ -1057,7 +1057,7 @@ def campaign_config_with_custom_target_attributes( s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[list[CampaignConfig]]: """Create and upload multiple campaign configs to S3, then clean up after tests.""" campaigns, campaign_data_keys = [], [] @@ -1121,7 +1121,7 @@ def multiple_campaign_configs(s3_client: BaseClient, rules_bucket: BucketName) - s3_client.delete_object(Bucket=rules_bucket, Key=key) -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_virtual_cohort(s3_client: BaseClient, rules_bucket: BucketName) -> Generator[CampaignConfig]: campaign: CampaignConfig = rule.CampaignConfigFactory.build( target="COVID", @@ -1144,7 +1144,7 @@ def campaign_config_with_virtual_cohort(s3_client: BaseClient, rules_bucket: Buc s3_client.delete_object(Bucket=rules_bucket, Key=f"{campaign.name}.json") -@pytest.fixture(scope="class") +@pytest.fixture def campaign_config_with_missing_descriptions_missing_rule_text( s3_client: BaseClient, rules_bucket: BucketName ) -> Generator[CampaignConfig]: @@ -1265,7 +1265,7 @@ def create_and_put_consumer_mapping_in_s3( return consumer_mapping -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_active_campaign_having_invalid_tokens_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1279,7 +1279,7 @@ def consumer_to_active_campaign_having_invalid_tokens_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_active_campaign_having_tokens_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1293,7 +1293,7 @@ def consumer_to_active_campaign_having_tokens_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_active_rsv_campaign_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1307,7 +1307,7 @@ def consumer_to_active_rsv_campaign_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="function") +@pytest.fixture def consumer_to_active_campaign_having_and_rule_mapping( s3_client: BaseClient, consumer_mapping_bucket: BucketName, @@ -1456,7 +1456,7 @@ def consumer_to_campaign_having_inactive_iteration_mapping( s3_client.delete_object(Bucket=consumer_mapping_bucket, Key="consumer_mapping_config.json") -@pytest.fixture(scope="class") +@pytest.fixture def consumer_to_multiple_campaign_configs_mapping( multiple_campaign_configs: list[CampaignConfig], consumer_id: ConsumerId, diff --git a/tests/integration/in_process/test_eligibility_endpoint.py b/tests/integration/in_process/test_eligibility_endpoint.py index 466f0e575..a7ce2aea3 100644 --- a/tests/integration/in_process/test_eligibility_endpoint.py +++ b/tests/integration/in_process/test_eligibility_endpoint.py @@ -17,7 +17,13 @@ has_key, ) -from eligibility_signposting_api.model.campaign_config import CampaignConfig, RuleComparator +from eligibility_signposting_api.model.campaign_config import ( + CampaignConfig, + RuleAttributeLevel, + RuleComparator, + RuleOperator, + RuleType, +) from eligibility_signposting_api.model.consumer_mapping import ConsumerId, ConsumerMapping from eligibility_signposting_api.model.eligibility_status import ( NHSNumber, @@ -27,14 +33,16 @@ from tests.integration.conftest import UNIQUE_CONSUMER_HEADER -def today(): +def today() -> date: return datetime.now(UTC).date() -def yesterday(): - return datetime.now(UTC).date()- timedelta(days=1) -def tomorrow(): - return datetime.now(UTC).date()+ timedelta(days=1) +def yesterday() -> date: + return datetime.now(UTC).date() - timedelta(days=1) + + +def tomorrow() -> date: + return datetime.now(UTC).date() + timedelta(days=1) class TestBaseLine: @@ -1204,7 +1212,7 @@ def test_valid_response_when_consumer_has_a_valid_campaign_config_mapping( # no [ # Creates campaign configs by [target, campaign id, iteration status, iteration date] ("RSV", "RSV_campaign_id_1", "active", today()), - ("RSV", "RSV_campaign_id_2", "active",today()), + ("RSV", "RSV_campaign_id_2", "active", today()), ("RSV", "RSV_campaign_id_3", "active", today()), ("RSV", "RSV_campaign_id_4", "active", yesterday()), # inactive iteration @@ -1270,158 +1278,60 @@ def test_if_correct_campaign_is_chosen_for_the_consumer_when_multiple_campaign_e else: assert_that(len(audit_data["response"]["condition"]), equal_to(0)) - def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target( # noqa : PLR0913 - self, - client: FlaskClient, - persisted_person_pc_sw19: NHSNumber, - s3_client: BaseClient, - consumer_mapping_bucket: BucketName, - rules_bucket: BucketName, - secretsmanager_client: BaseClient, # noqa: ARG002 - ): - # Given - consumer_id = "consumer-n3bs-jo4hn-ce4na" - headers = {"nhs-login-nhs-number": str(persisted_person_pc_sw19), UNIQUE_CONSUMER_HEADER: consumer_id} - - # Consumer Mapping Data - s3_client.put_object( - Bucket=consumer_mapping_bucket, - Key="consumer_mapping_config.json", - Body=json.dumps( - { - consumer_id: [ - {"CampaignConfigID": "RSV_campaign_id_not_actionable"}, - {"CampaignConfigID": "RSV_campaign_id_actionable"}, - ], - } - ), - ContentType="application/json", - ) - - # Campaign configs - campaign_1 = rule.CampaignConfigFactory.build( - id="RSV_campaign_id_not_actionable", - target="RSV", - type="V", - iterations=[ - rule.IterationFactory.build( - iteration_rules=[ - rule.PostcodeSuppressionRuleFactory.build(name="Exclude SW19", description=""), - ], - iteration_cohorts=[ - rule.IterationCohortFactory.build( - cohort_label="cohort1", - cohort_group="cohort_group1", - positive_description="positive_description", - ) - ], - status_text=None, - ) - ], - ) - - campaign_2 = rule.CampaignConfigFactory.build( - id="RSV_campaign_id_actionable", - target="RSV", - type="V", - iterations=[ - rule.IterationFactory.build( - iteration_rules=[ - rule.PostcodeSuppressionRuleFactory.build(name="Exclude M4", comparator=RuleComparator("M4")), - ], - iteration_cohorts=[ - rule.IterationCohortFactory.build( - cohort_label="cohort1", - cohort_group="cohort_group1", - positive_description="positive_description", - ) - ], - status_text=None, - ) - ], - ) - - for campaign in [campaign_1, campaign_2]: - s3_client.put_object( - Bucket=rules_bucket, - Key=f"{campaign.id}.json", - Body=json.dumps({"CampaignConfig": campaign.model_dump(by_alias=True)}), - ContentType="application/json", - ) - - # When - response = client.get(f"/patient-check/{persisted_person_pc_sw19}?includeActions=Y", headers=headers) - - # Then - assert_that( - response, - is_response() - .with_status_code(HTTPStatus.OK) - .and_text( - is_json_that( - has_entry( - "processedSuggestions", - equal_to( - [ - { - "condition": "RSV", - "status": "Actionable", - "eligibilityCohorts": [ - { - "cohortCode": "cohort_group1", - "cohortStatus": "Actionable", - "cohortText": "positive_description", - } - ], - "actions": [ - { - "actionCode": "action_code", - "actionType": "defaultcomms", - "description": "", - "urlLabel": "", - "urlLink": "", - } - ], - "suitabilityRules": [], - "statusText": "You should have the RSV vaccine", - } - ] - ), - ) - ) - ), - ) - @pytest.mark.parametrize( - ("campaign_1_start_date", "campaign_2_start_date", "postcode_for_comparator", "expected_campaign_id"), + ( + "campaign_1_start_date", + "campaign_2_start_date", + "postcode_for_comparator", + "cohort_for_comparator", + "expected_campaign_id", + ), [ ( ("RSV_campaign_id_1", today()), ("RSV_campaign_id_2", today() - timedelta(days=1)), - "SW19", # postcode for resulting in not-actionable + "SW19", # postcode for resulting in not-actionable (used by the suppression rule) + "cohort2", "RSV_campaign_id_1", ), ( ("RSV_campaign_id_1", today() - timedelta(days=1)), ("RSV_campaign_id_2", today()), "SW19", # postcode for resulting in not-actionable + "cohort2", "RSV_campaign_id_2", ), ( ("RSV_campaign_id_1", today()), ("RSV_campaign_id_2", today() - timedelta(days=1)), "M4", # postcode for resulting in actionable + "cohort2", "RSV_campaign_id_1", ), ( ("RSV_campaign_id_1", today() - timedelta(days=1)), ("RSV_campaign_id_2", today()), "M4", # postcode for resulting in actionable + "cohort2", + "RSV_campaign_id_2", + ), + ( + ("RSV_campaign_id_1", today()), + ("RSV_campaign_id_2", today() - timedelta(days=1)), + "M4", # cohort for resulting in not-eligible + "cohort1", + "RSV_campaign_id_1", + ), + ( + ("RSV_campaign_id_1", today() - timedelta(days=1)), + ("RSV_campaign_id_2", today()), + "M4", + "cohort1", # cohort for resulting in not-eligible (used by the filter rule) "RSV_campaign_id_2", ), ], ) - def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campaign_per_target_diff_start_date( # noqa : PLR0913 + def test_if_cc_with_latest_active_iteration_is_chosen_if_exists_multiple_campaign_with_diff_iteration_date( # noqa: PLR0913 self, client: FlaskClient, persisted_person_pc_sw19: NHSNumber, @@ -1433,6 +1343,7 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa campaign_1_start_date: tuple[str, date], campaign_2_start_date: tuple[str, date], postcode_for_comparator: str, + cohort_for_comparator: str, expected_campaign_id: NHSNumber, ): # Given @@ -1455,14 +1366,22 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa ) # Campaign configs - campaign_1 = rule.CampaignConfigFactory.build( + campaign_1 = rule.RawCampaignConfigFactory.build( id=campaign_1_start_date[0], target="RSV", start_date=campaign_1_start_date[1], type="V", iterations=[ rule.IterationFactory.build( + iteration_date=campaign_1_start_date[1], iteration_rules=[ + rule.IterationRuleFactory.build( + type=RuleType.filter, + name="Exclude if cohort matches", + attribute_level=RuleAttributeLevel.COHORT, + comparator=RuleComparator(cohort_for_comparator), + operator=RuleOperator.member_of, + ), rule.PostcodeSuppressionRuleFactory.build( name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) ), @@ -1479,13 +1398,14 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa ], ) - campaign_2 = rule.CampaignConfigFactory.build( + campaign_2 = rule.RawCampaignConfigFactory.build( id=campaign_2_start_date[0], target="RSV", type="V", start_date=campaign_2_start_date[1], iterations=[ rule.IterationFactory.build( + iteration_date=campaign_2_start_date[1], iteration_rules=[ rule.PostcodeSuppressionRuleFactory.build( name="Exclude M4", comparator=RuleComparator(postcode_for_comparator) @@ -1525,3 +1445,99 @@ def test_if_campaign_having_best_status_is_chosen_if_there_exists_multiple_campa assert_that(audit_data["response"]["condition"][0].get("campaignId"), equal_to(expected_campaign_id)) else: assert_that(len(audit_data["response"]["condition"]), equal_to(0)) + + def test_if_multiple_active_iterations_with_same_iteration_datetime_for_the_same_target_throws_internal_error( # noqa: PLR0913 + self, + client: FlaskClient, + persisted_person_pc_sw19: NHSNumber, + s3_client: BaseClient, + consumer_mapping_bucket: BucketName, + rules_bucket: BucketName, + secretsmanager_client: BaseClient, # noqa: ARG002 + caplog, + ): + # Given + consumer_id = "consumer-n3bs-jo4hn-ce4na" + headers = {"nhs-login-nhs-number": str(persisted_person_pc_sw19), UNIQUE_CONSUMER_HEADER: consumer_id} + + # Consumer Mapping Data + s3_client.put_object( + Bucket=consumer_mapping_bucket, + Key="consumer_mapping_config.json", + Body=json.dumps( + { + consumer_id: [ + {"CampaignConfigID": "RSV_campaign_id_1"}, + {"CampaignConfigID": "RSV_campaign_id_2"}, + ], + } + ), + ContentType="application/json", + ) + previous_day = yesterday() + # Campaign configs + campaign_1 = rule.RawCampaignConfigFactory.build( + id="RSV_campaign_id_1", + target="RSV", + start_date=previous_day, + type="V", + iterations=[rule.IterationFactory.build(iteration_date=previous_day)], + ) + + campaign_2 = rule.RawCampaignConfigFactory.build( + id="RSV_campaign_id_2", + target="RSV", + start_date=previous_day, + type="V", + iterations=[rule.IterationFactory.build(iteration_date=previous_day)], + ) + + for campaign in [campaign_1, campaign_2]: + s3_client.put_object( + Bucket=rules_bucket, + Key=f"{campaign.id}.json", + Body=json.dumps({"CampaignConfig": campaign.model_dump(by_alias=True)}), + ContentType="application/json", + ) + + # When + response = client.get(f"/patient-check/{persisted_person_pc_sw19}", headers=headers) + + assert_that( + response, + is_response() + .with_status_code(HTTPStatus.INTERNAL_SERVER_ERROR) + .with_headers(has_entries({"Content-Type": "application/fhir+json"})) + .and_text( + is_json_that( + has_entries( + resourceType="OperationOutcome", + issue=contains_exactly( + has_entries( + severity="error", + code="processing", + diagnostics="An unexpected error occurred.", + details={ + "coding": [ + { + "system": "https://fhir.nhs.uk/STU3/ValueSet/Spine-ErrorOrWarningCode-1", + "code": "INTERNAL_SERVER_ERROR", + "display": "An unexpected internal server error occurred.", + } + ] + }, + ) + ), + ) + ) + ), + ) + err_msg = ( + "Ambiguous result: '2' active iterations " + "for target RSV " + f"found for datetime '{previous_day} 00:00:00+00:00' " + "across campaign(s) ['RSV_campaign_id_1', 'RSV_campaign_id_2']" + ) + assert any(err_msg in message for message in caplog.messages), ( + f"Expected log message not found. Logged messages: {caplog.messages}" + ) diff --git a/tests/unit/services/processors/test_campaign_evaluator.py b/tests/unit/services/processors/test_campaign_evaluator.py index 1cdcaf737..4a0e1330f 100644 --- a/tests/unit/services/processors/test_campaign_evaluator.py +++ b/tests/unit/services/processors/test_campaign_evaluator.py @@ -16,16 +16,16 @@ def campaign_evaluator(): @pytest.mark.parametrize( ("campaign_target", "campaign_type", "conditions_filter", "category_filter", "expected_result"), [ - ("RSV", "V", ["RSV"], "VACCINATIONS", [("RSV", "V")]), - ("RSV", "V", ["COVID"], "VACCINATIONS", []), - ("RSV", "S", ["RSV"], "ALL", [("RSV", "S")]), - ("RSV", "S", ["ALL"], "ALL", [("RSV", "S")]), - ("RSV", "S", ["RSV"], "VACCINATIONS", []), - ("RSV", "V", ["RSV"], "ALL", [("RSV", "V")]), - ("FLU", "V", ["COVID", "RSV"], "ALL", []), - ("FLU", "S", ["ALL"], "ALL", [("FLU", "S")]), - ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", []), - ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", [("FLU", "V")]), + ("RSV", "V", ["RSV"], "VACCINATIONS", ("RSV", "V")), + ("RSV", "V", ["COVID"], "VACCINATIONS", None), + ("RSV", "S", ["RSV"], "ALL", ("RSV", "S")), + ("RSV", "S", ["ALL"], "ALL", ("RSV", "S")), + ("RSV", "S", ["RSV"], "VACCINATIONS", None), + ("RSV", "V", ["RSV"], "ALL", ("RSV", "V")), + ("FLU", "V", ["COVID", "RSV"], "ALL", None), + ("FLU", "S", ["ALL"], "ALL", ("FLU", "S")), + ("COVID", "V", ["UNKNOWN"], "VACCINATIONS", None), + ("FLU", "V", ["COVID", "FLU"], "VACCINATIONS", ("FLU", "V")), ], ) def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 @@ -33,8 +33,12 @@ def test_campaigns_grouped_by_condition_name_filters_correctly( # noqa: PLR0913 ): campaign = rule.CampaignConfigFactory.build(target=campaign_target, type=campaign_type) - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], conditions_filter, category_filter) - assert_that([(str(name), group[0].type) for name, group in result], is_(expected_result)) + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + [campaign], conditions_filter, category_filter + ) + + actual = next(((str(name), camp.type) for name, camp in result if camp is not None), None) + assert actual == expected_result def test_campaigns_grouped_by_condition_name_with_no_campaigns(campaign_evaluator): @@ -47,7 +51,9 @@ def test_campaigns_grouped_by_condition_name_with_no_active_campaigns(campaign_e target="RSV", type="V", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) ) - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["RSV"], "VACCINATIONS") + result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + [campaign], ["RSV"], "VACCINATIONS" + ) assert_that(list(result), is_([])) @@ -63,7 +69,9 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( campaign_evaluator, category_filter, campaign_type, expected_count ): campaign = rule.CampaignConfigFactory.build(target="COVID", type=campaign_type) - result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["COVID"], category_filter)) + result = list( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], ["COVID"], category_filter) + ) assert_that(len(result), is_(expected_count)) if expected_count > 0: assert_that(str(result[0][0]), is_("COVID")) @@ -71,47 +79,71 @@ def test_campaigns_grouped_by_condition_name_with_various_categories( def test_campaigns_grouped_by_condition_name_with_empty_conditions_filter(campaign_evaluator): campaign = rule.CampaignConfigFactory.build(target="RSV", type="V") - result = campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") - assert_that(list(result), is_([])) + result = list( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target([campaign], [], "VACCINATIONS") + ) + + assert_that(result, is_([])) def test_campaigns_grouped_by_condition_name_groups_multiple_campaigns_for_same_target(campaign_evaluator): - campaign1 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C1") - campaign2 = rule.CampaignConfigFactory.build(target="COVID", type="V", id="C2") + # providing the start_date here, because CampaignConfigFactory used it for iteration_date + campaign1 = rule.CampaignConfigFactory.build( + target="COVID", + type="V", + id="C1", + start_date=datetime.datetime.now(datetime.UTC).date() - datetime.timedelta(days=1), + iterations=[rule.IterationFactory.build()], + ) + campaign2 = rule.CampaignConfigFactory.build( + target="COVID", + type="V", + id="C2", + start_date=datetime.datetime.now(datetime.UTC).date(), + iterations=[rule.IterationFactory.build()], + ) campaign3 = rule.CampaignConfigFactory.build(target="FLU", type="V", id="F1") inactive_campaign = rule.CampaignConfigFactory.build( target="COVID", type="V", id="C3", start_date=datetime.date(2025, 4, 20), end_date=datetime.date(2025, 4, 21) ) all_campaigns = [campaign1, campaign2, campaign3, inactive_campaign] - result = list(campaign_evaluator.get_campaign_with_latest_active_iteration_per_target(all_campaigns, ["COVID", "FLU"], "VACCINATIONS")) + result = list( + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + all_campaigns, ["COVID", "FLU"], "VACCINATIONS" + ) + ) assert_that(len(result), is_(2)) - result_dict = {str(name): campaigns for name, campaigns in result} + result_dict = {str(name): campaign for name, campaign in result} + assert_that("COVID" in result_dict) assert_that("FLU" in result_dict) - assert_that(len(result_dict["COVID"]), is_(2)) - assert_that({c.id for c in result_dict["COVID"]}, is_({CampaignID("C1"), CampaignID("C2")})) - - assert_that(len(result_dict["FLU"]), is_(1)) - assert_that(result_dict["FLU"][0].id, is_(CampaignID("F1"))) + assert_that(result_dict["COVID"].id, is_(CampaignID("C2"))) + assert_that(result_dict["FLU"].id, is_(CampaignID("F1"))) -def test_campaign_grouping_is_affected_by_order_for_mixed_types(campaign_evaluator): +def test_campaign_grouping_is_not_affected_by_order_for_mixed_types(campaign_evaluator): campaign_v = rule.CampaignConfigFactory.build(target="RSV", type="V") campaign_s = rule.CampaignConfigFactory.build(target="RSV", type="S") - evaluator_s_first = campaign_evaluator + # Order: S then V result_s_first = list( - evaluator_s_first.get_campaign_with_latest_active_iteration_per_target([campaign_s, campaign_v], ["RSV"], "VACCINATIONS") + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + [campaign_s, campaign_v], ["RSV"], "VACCINATIONS" + ) ) - assert_that(result_s_first, is_([])) + # Even if S is first, it is filtered out by 'allowed_types' + assert_that(len(result_s_first), is_(1)) + assert_that(result_s_first[0][1].type, is_("V")) - evaluator_v_first = campaign_evaluator + # Order: V then S result_v_first = list( - evaluator_v_first.get_campaign_with_latest_active_iteration_per_target([campaign_v, campaign_s], ["RSV"], "VACCINATIONS") + campaign_evaluator.get_campaign_with_latest_active_iteration_per_target( + [campaign_v, campaign_s], ["RSV"], "VACCINATIONS" + ) ) assert_that(len(result_v_first), is_(1)) - assert_that(len(result_v_first[0][1]), is_(2)) + assert_that(result_v_first[0][1].type, is_("V")) From 1a342117ea339aeebdf4247b678a3542b7122fdf Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 14:05:38 +0000 Subject: [PATCH 61/66] ELI-674 - pytest for rules_validation/app.py --- tests/unit/validation/test_app.py | 86 ++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/tests/unit/validation/test_app.py b/tests/unit/validation/test_app.py index 1fb9671a9..8fba4d977 100644 --- a/tests/unit/validation/test_app.py +++ b/tests/unit/validation/test_app.py @@ -1,7 +1,7 @@ import sys from datetime import UTC, datetime, timedelta from io import StringIO -from unittest.mock import Mock, PropertyMock +from unittest.mock import Mock, PropertyMock, patch from pydantic import BaseModel, ValidationError @@ -159,3 +159,87 @@ def test_next_iteration_exists(): assert "Next active Iteration Number:" in output assert "8" in output assert str(today + timedelta(days=5)) in output + + +def test_campaign_expired_and_no_next_iteration(): + """Covers: is_campaign_expired = True, next iteration logic skipped.""" + today = datetime.now(UTC).date() + + result = Mock() + config = result.campaign_config + config.campaign_live = False + config.end_date = today - timedelta(days=1) # Expired + config.iterations = [] + + captured = StringIO() + with patch("sys.stdout", new=captured): + display_current_iteration(result) + + output = captured.getvalue() + assert "NOT LIVE" in output + assert "EXPIRED on" in output + assert "Next active Iteration Number" not in output + + +def test_campaign_to_be_started(): + """Covers: is_campaign_expired = False, campaign_live = False.""" + today = datetime.now(UTC).date() + + result = Mock() + config = result.campaign_config + config.campaign_live = False + config.end_date = today + timedelta(days=10) + config.start_date = today + timedelta(days=2) + config.iterations = [] + + captured = StringIO() + with patch("sys.stdout", new=captured): + display_current_iteration(result) + + output = captured.getvalue() + assert "NOT LIVE" in output + assert "To be STARTED on" in output + + +def test_next_iteration_stop_iteration_exception(): + """ + Covers the 'except StopIteration' block in the Next Iteration section. + This triggers if the generator inside next() raises StopIteration explicitly. + """ + today = datetime.now(UTC).date() + + result = Mock() + config = result.campaign_config + config.campaign_live = False + config.end_date = today + timedelta(days=10) + config.iterations = [Mock(iteration_date=today + timedelta(days=5))] + + captured = StringIO() + with patch("sys.stdout", new=captured), patch("rules_validation_api.app.next", side_effect=StopIteration): + display_current_iteration(result) + + output = captured.getvalue() + assert "No next active iteration could be determined" in output + + +def test_next_iteration_is_none(): + today = datetime.now(UTC).date() + + result = Mock() + config = result.campaign_config + config.campaign_live = False + config.end_date = today + timedelta(days=10) + + past_iteration = Mock() + past_iteration.iteration_date = today - timedelta(days=5) + config.iterations = [past_iteration] + + captured = StringIO() + with patch("sys.stdout", new=captured): + display_current_iteration(result) + + output = captured.getvalue() + + assert "Next active Iteration Number" not in output + assert "Total iterations configured:" in output + assert "1" in output From e191e4aa036dfa4692c875edc2f2ecd5522a4cab Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 14:13:34 +0000 Subject: [PATCH 62/66] ELI-674 - updated comments for sonar fix --- src/eligibility_signposting_api/model/campaign_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 40b6e505a..8cb9fad23 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -320,7 +320,7 @@ def parse_times(cls, v: str | time) -> time | None: msg = f"Invalid time value: {v_str}. Must be a valid time in HH:MM:SS." raise ValueError(msg) from err - # If none matched, raise a format error + # If none matches, raise a format error msg = f"Invalid format: {v_str}. Must be HH:MM:SS." raise ValueError(msg) @@ -417,7 +417,7 @@ def parse_times(cls, v: str | time) -> time | None: msg = f"Invalid time value: {v_str}. Must be a valid time in HH:MM:SS." raise ValueError(msg) from err - # If none matched, raise a format error + # If none matches, raise a format error msg = f"Invalid format: {v_str}. Must be HH:MM:SS." raise ValueError(msg) From b4efff2d4f5e20862644d0c2772fa8bdf727f83e Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 14:21:22 +0000 Subject: [PATCH 63/66] ELI-674 - sonar suppression --- src/eligibility_signposting_api/model/campaign_config.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 8cb9fad23..9801c9845 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -287,7 +287,7 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @field_validator("iteration_date", mode="before") @classmethod - def parse_dates(cls, v: str | date) -> date: + def parse_dates(cls, v: str | date) -> date: # pragma: no cover if isinstance(v, date): return v @@ -305,7 +305,7 @@ def parse_dates(cls, v: str | date) -> date: @field_validator("iteration_time", mode="before") @classmethod - def parse_times(cls, v: str | time) -> time | None: + def parse_times(cls, v: str | time) -> time | None: # pragma: no cover if not v: return None if isinstance(v, time): @@ -384,7 +384,7 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @field_validator("start_date", "end_date", mode="before") @classmethod - def parse_dates(cls, v: str | date) -> date: + def parse_dates(cls, v: str | date) -> date: # pragma: no cover if isinstance(v, date): return v @@ -402,7 +402,7 @@ def parse_dates(cls, v: str | date) -> date: @field_validator("iteration_time", mode="before") @classmethod - def parse_times(cls, v: str | time) -> time | None: + def parse_times(cls, v: str | time) -> time | None: # pragma: no cover if not v: return None if isinstance(v, time): From 62d34764e6f9f2b7b85c3f7f035d15308c3b5ac6 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 14:26:02 +0000 Subject: [PATCH 64/66] Revert "ELI-674 - sonar suppression" This reverts commit b4efff2d4f5e20862644d0c2772fa8bdf727f83e. --- src/eligibility_signposting_api/model/campaign_config.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 9801c9845..8cb9fad23 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -287,7 +287,7 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @field_validator("iteration_date", mode="before") @classmethod - def parse_dates(cls, v: str | date) -> date: # pragma: no cover + def parse_dates(cls, v: str | date) -> date: if isinstance(v, date): return v @@ -305,7 +305,7 @@ def parse_dates(cls, v: str | date) -> date: # pragma: no cover @field_validator("iteration_time", mode="before") @classmethod - def parse_times(cls, v: str | time) -> time | None: # pragma: no cover + def parse_times(cls, v: str | time) -> time | None: if not v: return None if isinstance(v, time): @@ -384,7 +384,7 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @field_validator("start_date", "end_date", mode="before") @classmethod - def parse_dates(cls, v: str | date) -> date: # pragma: no cover + def parse_dates(cls, v: str | date) -> date: if isinstance(v, date): return v @@ -402,7 +402,7 @@ def parse_dates(cls, v: str | date) -> date: # pragma: no cover @field_validator("iteration_time", mode="before") @classmethod - def parse_times(cls, v: str | time) -> time | None: # pragma: no cover + def parse_times(cls, v: str | time) -> time | None: if not v: return None if isinstance(v, time): From 8826fa8cc35d7ade39ec331cd430cf70b25d5692 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Mon, 9 Mar 2026 14:41:23 +0000 Subject: [PATCH 65/66] ELI-674 - linting suppression --- .../model/campaign_config.py | 98 +++++++------------ 1 file changed, 36 insertions(+), 62 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 8cb9fad23..4d31eedfd 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -51,6 +51,38 @@ RuleText = NewType("RuleText", str) +class DateUtil: + @staticmethod + def parse_date_yyyymmdd(v: str | date) -> date: + if isinstance(v, date): + return v + v_str = str(v) + if not re.fullmatch(r"\d{8}", v_str): + msg = f"Invalid format: {v_str}. Must be YYYYMMDD." + raise ValueError(msg) + try: + return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 + except ValueError as err: + msg = f"Invalid date value: {v_str}." + raise ValueError(msg) from err + + @staticmethod + def parse_time_hhmmss(v: str | time | None) -> time | None: + if not v: + return None + if isinstance(v, time): + return v + v_str = str(v).strip() + if re.fullmatch(r"^\d{2}:\d{2}:\d{2}$", v_str): + try: + return datetime.strptime(v_str, "%H:%M:%S").time() # noqa: DTZ007 + except ValueError as err: + msg = f"Invalid time value: {v_str}." + raise ValueError(msg) from err + msg = f"Invalid format: {v_str}. Must be HH:MM:SS." + raise ValueError(msg) + + class RuleType(StrEnum): filter = "F" suppression = "S" @@ -288,41 +320,12 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @field_validator("iteration_date", mode="before") @classmethod def parse_dates(cls, v: str | date) -> date: - if isinstance(v, date): - return v - - v_str = str(v) - - if not re.fullmatch(r"\d{8}", v_str): - msg = f"Invalid format: {v_str}. Must be YYYYMMDD with 8 digits." - raise ValueError(msg) - - try: - return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 - except ValueError as err: - msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." - raise ValueError(msg) from err + return DateUtil.parse_date_yyyymmdd(v) @field_validator("iteration_time", mode="before") @classmethod def parse_times(cls, v: str | time) -> time | None: - if not v: - return None - if isinstance(v, time): - return v - - v_str = str(v).strip() - - if re.fullmatch(r"^\d{2}:\d{2}:\d{2}$", v_str): - try: - return datetime.strptime(v_str, "%H:%M:%S").time() # noqa: DTZ007 - except ValueError as err: - msg = f"Invalid time value: {v_str}. Must be a valid time in HH:MM:SS." - raise ValueError(msg) from err - - # If none matches, raise a format error - msg = f"Invalid format: {v_str}. Must be HH:MM:SS." - raise ValueError(msg) + return DateUtil.parse_time_hhmmss(v) @field_serializer("iteration_date", when_used="always") @staticmethod @@ -385,41 +388,12 @@ def __init__(self, **data: dict[str, typing.Any]) -> None: @field_validator("start_date", "end_date", mode="before") @classmethod def parse_dates(cls, v: str | date) -> date: - if isinstance(v, date): - return v - - v_str = str(v) - - if not re.fullmatch(r"\d{8}", v_str): - msg = f"Invalid format: {v_str}. Must be YYYYMMDD with 8 digits." - raise ValueError(msg) - - try: - return datetime.strptime(v_str, "%Y%m%d").date() # noqa: DTZ007 - except ValueError as err: - msg = f"Invalid date value: {v_str}. Must be a valid calendar date in YYYYMMDD format." - raise ValueError(msg) from err + return DateUtil.parse_date_yyyymmdd(v) @field_validator("iteration_time", mode="before") @classmethod def parse_times(cls, v: str | time) -> time | None: - if not v: - return None - if isinstance(v, time): - return v - - v_str = str(v).strip() - - if re.fullmatch(r"^\d{2}:\d{2}:\d{2}$", v_str): - try: - return datetime.strptime(v_str, "%H:%M:%S").time() # noqa: DTZ007 - except ValueError as err: - msg = f"Invalid time value: {v_str}. Must be a valid time in HH:MM:SS." - raise ValueError(msg) from err - - # If none matches, raise a format error - msg = f"Invalid format: {v_str}. Must be HH:MM:SS." - raise ValueError(msg) + return DateUtil.parse_time_hhmmss(v) @field_serializer("start_date", "end_date", when_used="always") @staticmethod From 04ca579e557d15ab4cdf7ad0474ebb390f592b50 Mon Sep 17 00:00:00 2001 From: karthikeyannhs <174426205+Karthikeyannhs@users.noreply.github.com> Date: Tue, 10 Mar 2026 11:21:06 +0000 Subject: [PATCH 66/66] ELI-674 - incorporate review comments - use @before for set_parent methods --- .../model/campaign_config.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/eligibility_signposting_api/model/campaign_config.py b/src/eligibility_signposting_api/model/campaign_config.py index 4d31eedfd..ce71cf188 100644 --- a/src/eligibility_signposting_api/model/campaign_config.py +++ b/src/eligibility_signposting_api/model/campaign_config.py @@ -311,11 +311,11 @@ class Iteration(BaseModel): model_config = {"populate_by_name": True, "arbitrary_types_allowed": True, "extra": "ignore"} - def __init__(self, **data: dict[str, typing.Any]) -> None: - super().__init__(**data) - # Ensure each rule knows its parent iteration - for rule in self.iteration_rules: - rule.set_parent(self) + @model_validator(mode="after") + def _link_parent_to_iteration_rules(self) -> typing.Self: + for iteration in self.iteration_rules: + iteration.set_parent(self) + return self @field_validator("iteration_date", mode="before") @classmethod @@ -379,12 +379,13 @@ class CampaignConfig(BaseModel): model_config = {"populate_by_name": True, "arbitrary_types_allowed": True, "extra": "ignore"} - def __init__(self, **data: dict[str, typing.Any]) -> None: - super().__init__(**data) - # Ensure each rule knows its parent iteration + @model_validator(mode="after") + def _link_parent_to_iterations(self) -> typing.Self: for iteration in self.iterations: iteration.set_parent(self) + return self + @field_validator("start_date", "end_date", mode="before") @classmethod def parse_dates(cls, v: str | date) -> date: