From 7a63bfed2e51cc51d71d6608042aff19befc81fe Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Wed, 5 Nov 2025 00:36:38 +0000 Subject: [PATCH 01/16] chore: update HISTORY.md for main --- HISTORY.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/HISTORY.md b/HISTORY.md index 0f3a9e8..14ba25d 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -6,6 +6,17 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.3.0](https://github.com/cortexapps/cli/releases/tag/1.3.0) - 2025-11-05 + +[Compare with 1.2.0](https://github.com/cortexapps/cli/compare/1.2.0...1.3.0) + +### Fixed + +- fix: add retry logic for scorecard create to handle active evaluations ([cc40b55](https://github.com/cortexapps/cli/commit/cc40b55ed9ef5af4146360b5a879afc6dc67fe06) by Jeff Schnitter). +- fix: use json.dump instead of Rich print for file writing ([c66c2fe](https://github.com/cortexapps/cli/commit/c66c2fe438cc95f8343fbd4ba3cecae605c435ea) by Jeff Schnitter). +- fix: ensure export/import output is in alphabetical order ([9055f78](https://github.com/cortexapps/cli/commit/9055f78cc4e1136da20e4e42883ff3c0f248825b) by Jeff Schnitter). +- fix: ensure CORTEX_BASE_URL is available in publish workflow ([743579d](https://github.com/cortexapps/cli/commit/743579d760e900da693696df2841e7b710b08d39) by Jeff Schnitter). + ## [1.2.0](https://github.com/cortexapps/cli/releases/tag/1.2.0) - 2025-11-04 [Compare with 1.1.0](https://github.com/cortexapps/cli/compare/1.1.0...1.2.0) From 8879fcfa7ee30a73f023e8bbef7d799808493319 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 07:52:34 -0800 Subject: [PATCH 02/16] perf: optimize test scheduling with --dist loadfile for 25% faster test runs (#157) --- Justfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Justfile b/Justfile index 2dca61a..de6a855 100644 --- a/Justfile +++ b/Justfile @@ -13,7 +13,7 @@ _setup: # Run all tests test-all: _setup test-import - {{pytest}} -n auto -m "not setup" --html=report.html --self-contained-html --cov=cortexapps_cli --cov-append --cov-report term-missing tests + {{pytest}} -n auto --dist loadfile -m "not setup" --html=report.html --self-contained-html --cov=cortexapps_cli --cov-append --cov-report term-missing tests # Run all tests serially - helpful to see if any tests seem to be hanging _test-all-individual: test-import From 8c1ba4fcc0d106dacbc595ecc13a95cd6995fd8d Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 10:01:36 -0800 Subject: [PATCH 03/16] refactor: separate trigger-evaluation test to avoid scorecard evaluation race conditions - Create dedicated cli-test-evaluation-scorecard for trigger-evaluation testing - Remove retry logic complexity from test_scorecards() and test_scorecards_drafts() - Add new test_scorecard_trigger_evaluation() that creates/deletes its own scorecard - Eliminates race condition where import triggers evaluation conflicting with tests --- .../cli-test-evaluation-scorecard.yaml | 21 +++++++++ tests/test_scorecards.py | 46 +++++++------------ 2 files changed, 37 insertions(+), 30 deletions(-) create mode 100644 data/import/scorecards/cli-test-evaluation-scorecard.yaml diff --git a/data/import/scorecards/cli-test-evaluation-scorecard.yaml b/data/import/scorecards/cli-test-evaluation-scorecard.yaml new file mode 100644 index 0000000..2524796 --- /dev/null +++ b/data/import/scorecards/cli-test-evaluation-scorecard.yaml @@ -0,0 +1,21 @@ +tag: cli-test-evaluation-scorecard +name: CLI Test Evaluation Scorecard +description: Used to test Cortex CLI trigger-evaluation command +draft: false +ladder: + name: Default Ladder + levels: + - name: You Made It + rank: 1 + description: "My boring description" + color: 7cf376 +rules: +- title: Has Custom Data + expression: custom("testField") != null + weight: 1 + level: You Made It + filter: + category: SERVICE +filter: + query: 'entity.tag() == "cli-test-service"' + category: SERVICE diff --git a/tests/test_scorecards.py b/tests/test_scorecards.py index 801f556..f19ac48 100644 --- a/tests/test_scorecards.py +++ b/tests/test_scorecards.py @@ -11,18 +11,7 @@ def _get_rule(title): return rule_id[0] def test_scorecards(): - # Retry scorecard create in case there's an active evaluation - # (can happen if test_import.py just triggered an evaluation) - max_retries = 3 - for attempt in range(max_retries): - try: - cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-scorecard.yaml"]) - break - except Exception as e: - if "500" in str(e) and attempt < max_retries - 1: - time.sleep(2 ** attempt) # Exponential backoff: 1s, 2s - continue - raise + cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-scorecard.yaml"]) response = cli(["scorecards", "list"]) assert any(scorecard['tag'] == 'cli-test-scorecard' for scorecard in response['scorecards']), "Should find scorecard with tag cli-test-scorecard" @@ -39,33 +28,30 @@ def test_scorecards(): # cannot rely on a scorecard evaluation being complete, so not performing any validation cli(["scorecards", "next-steps", "-s", "cli-test-scorecard", "-t", "cli-test-service"]) - # Test trigger-evaluation command (accepts both success and 409 Already evaluating) - response = cli(["scorecards", "trigger-evaluation", "-s", "cli-test-scorecard", "-e", "cli-test-service"], return_type=ReturnType.STDOUT) - assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response), \ - "Should receive success message or 409 Already evaluating error" - # cannot rely on a scorecard evaluation being complete, so not performing any validation #response = cli(["scorecards", "scores", "-s", "cli-test-scorecard", "-t", "cli-test-service"]) #assert response['scorecardTag'] == "cli-test-scorecard", "Should get valid response that include cli-test-scorecard" - + # # Not sure if we can run this cli right away. Newly-created Scorecard might not be evaluated yet. # # 2024-05-06, additionally now blocked by CET-8882 # # cli(["scorecards", "scores", "-t", "cli-test-scorecard", "-e", "cli-test-service"]) # # cli(["scorecards", "scores", "-t", "cli-test-scorecard"]) - + +def test_scorecard_trigger_evaluation(): + # Create a dedicated scorecard for trigger-evaluation testing to avoid conflicts with import + cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-evaluation-scorecard.yaml"]) + + # Test trigger-evaluation command (accepts both success and 409 Already evaluating) + response = cli(["scorecards", "trigger-evaluation", "-s", "cli-test-evaluation-scorecard", "-e", "cli-test-service"], return_type=ReturnType.STDOUT) + assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response), \ + "Should receive success message or 409 Already evaluating error" + + # Clean up + cli(["scorecards", "delete", "-s", "cli-test-evaluation-scorecard"]) + def test_scorecards_drafts(): - # Retry scorecard create in case there's an active evaluation - max_retries = 3 - for attempt in range(max_retries): - try: - cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-draft-scorecard.yaml"]) - break - except Exception as e: - if "500" in str(e) and attempt < max_retries - 1: - time.sleep(2 ** attempt) # Exponential backoff: 1s, 2s - continue - raise + cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-draft-scorecard.yaml"]) response = cli(["scorecards", "list", "-s"]) assert any(scorecard['tag'] == 'cli-test-draft-scorecard' for scorecard in response['scorecards']) From 3e09a81e22ea3aed35ee780c605f108bf176b305 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 11:17:59 -0800 Subject: [PATCH 04/16] refactor: remove unnecessary mock decorator from _get_rule helper function The helper function doesn't need its own environment patching since it's called from fixtures that already have their own @mock.patch.dict decorators. --- tests/test_scorecards.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/test_scorecards.py b/tests/test_scorecards.py index f19ac48..7b5c991 100644 --- a/tests/test_scorecards.py +++ b/tests/test_scorecards.py @@ -4,7 +4,6 @@ # Get rule id to be used in exemption tests. # TODO: check for and revoke any PENDING exemptions. -@mock.patch.dict(os.environ, {"CORTEX_API_KEY": os.environ['CORTEX_API_KEY']}) def _get_rule(title): response = cli(["scorecards", "get", "-s", "cli-test-scorecard"]) rule_id = [rule['identifier'] for rule in response['scorecard']['rules'] if rule['title'] == title] @@ -66,7 +65,10 @@ def test_scorecards_drafts(): # testing assumes no tenanted data, so this condition needs to be created as part of the test # # - there is no public API to force evaluation of a scorecard; can look into possibility of using -# an internal endpoint for this +# an internal endpoint for this +# +# Nov 2025 - there is a public API to force evaluation of a scorecard for an entity, but there is +# not a way to determine when the evaluation completes. # # - could create a scorecard as part of the test and wait for it to complete, but completion time for # evaluating a scorecard is non-deterministic and, as experienced with query API tests, completion @@ -82,6 +84,7 @@ def test_scorecards_drafts(): # So this is how we'll roll for now . . . # - Automated tests currently run in known tenants that have the 'cli-test-scorecard' in an evaluated state. # - So we can semi-reliably count on an evaluated scorecard to exist. +# - However, we should be cleaning up test data after tests run which would invalidate these assumptions. @pytest.fixture(scope='session') @mock.patch.dict(os.environ, {"CORTEX_API_KEY": os.environ['CORTEX_API_KEY_VIEWER']}) From c03fa2280ab86fa6b0945dbff1097a67670d39b3 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 11:25:09 -0800 Subject: [PATCH 05/16] Revert "perf: optimize test scheduling with --dist loadfile for 25% faster test runs (#157)" This reverts commit 8879fcfa7ee30a73f023e8bbef7d799808493319. The --dist loadfile optimization caused race conditions between tests that share resources (e.g., test_custom_events_uuid and test_custom_events_list both operate on custom events and can interfere with each other when run in parallel by file). Reliability > speed. Better to have tests take 40s with no race conditions than 30s with intermittent failures. --- Justfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Justfile b/Justfile index de6a855..2dca61a 100644 --- a/Justfile +++ b/Justfile @@ -13,7 +13,7 @@ _setup: # Run all tests test-all: _setup test-import - {{pytest}} -n auto --dist loadfile -m "not setup" --html=report.html --self-contained-html --cov=cortexapps_cli --cov-append --cov-report term-missing tests + {{pytest}} -n auto -m "not setup" --html=report.html --self-contained-html --cov=cortexapps_cli --cov-append --cov-report term-missing tests # Run all tests serially - helpful to see if any tests seem to be hanging _test-all-individual: test-import From f36aae22f56317cde70a6a9df56b097edb6a6117 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 11:30:02 -0800 Subject: [PATCH 06/16] perf: rename test_deploys.py to test_000_deploys.py for early scheduling Pytest collects tests alphabetically by filename. With pytest-xdist --dist load, tests collected earlier are more likely to be scheduled first. Since test_deploys is the longest-running test (~19s), scheduling it early maximizes parallel efficiency with 12 workers. This is our general strategy: prefix slow tests with numbers (000, 001, etc.) to control scheduling order without introducing race conditions like --dist loadfile. --- tests/{test_deploys.py => test_000_deploys.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/{test_deploys.py => test_000_deploys.py} (100%) diff --git a/tests/test_deploys.py b/tests/test_000_deploys.py similarity index 100% rename from tests/test_deploys.py rename to tests/test_000_deploys.py From ca1d2155b6231236fac0356a5812fe8b3fbb3693 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 15:38:00 -0800 Subject: [PATCH 07/16] feat: add entity relationships API support and fix backup export bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix double-encoding bug in backup export for entity-types and ip-allowlist - entity-types and ip-allowlist were being converted to strings before json.dump - This caused import failures with "TypeError: string indices must be integers" - Add entity-relationship-types commands: - list: List all relationship types - get: Get relationship type by tag - create: Create new relationship type - update: Update existing relationship type - delete: Delete relationship type - Add entity-relationships commands: - list: List all relationships for a type - list-destinations: Get destinations for source entity - list-sources: Get sources for destination entity - add-destinations: Add destinations to source - add-sources: Add sources to destination - update-destinations: Replace all destinations for source - update-sources: Replace all sources for destination - add-bulk: Add multiple relationships - update-bulk: Replace all relationships for type - Integrate entity relationships into backup/restore: - Export entity-relationship-types and entity-relationships - Import with proper ordering (types before catalog, relationships after) - Transform export format to bulk update format for import 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/cli.py | 4 + cortexapps_cli/commands/backup.py | 89 +++++++- .../commands/entity_relationship_types.py | 112 +++++++++ .../commands/entity_relationships.py | 215 ++++++++++++++++++ 4 files changed, 415 insertions(+), 5 deletions(-) create mode 100644 cortexapps_cli/commands/entity_relationship_types.py create mode 100644 cortexapps_cli/commands/entity_relationships.py diff --git a/cortexapps_cli/cli.py b/cortexapps_cli/cli.py index 03d471d..b94c2ea 100755 --- a/cortexapps_cli/cli.py +++ b/cortexapps_cli/cli.py @@ -24,6 +24,8 @@ import cortexapps_cli.commands.discovery_audit as discovery_audit import cortexapps_cli.commands.docs as docs import cortexapps_cli.commands.entity_types as entity_types +import cortexapps_cli.commands.entity_relationship_types as entity_relationship_types +import cortexapps_cli.commands.entity_relationships as entity_relationships import cortexapps_cli.commands.gitops_logs as gitops_logs import cortexapps_cli.commands.groups as groups import cortexapps_cli.commands.initiatives as initiatives @@ -58,6 +60,8 @@ app.add_typer(discovery_audit.app, name="discovery-audit") app.add_typer(docs.app, name="docs") app.add_typer(entity_types.app, name="entity-types") +app.add_typer(entity_relationship_types.app, name="entity-relationship-types") +app.add_typer(entity_relationships.app, name="entity-relationships") app.add_typer(gitops_logs.app, name="gitops-logs") app.add_typer(groups.app, name="groups") app.add_typer(initiatives.app, name="initiatives") diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index a09fdc8..365907c 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -14,6 +14,8 @@ import cortexapps_cli.commands.scorecards as scorecards import cortexapps_cli.commands.catalog as catalog import cortexapps_cli.commands.entity_types as entity_types +import cortexapps_cli.commands.entity_relationship_types as entity_relationship_types +import cortexapps_cli.commands.entity_relationships as entity_relationships import cortexapps_cli.commands.ip_allowlist as ip_allowlist import cortexapps_cli.commands.plugins as plugins import cortexapps_cli.commands.workflows as workflows @@ -93,15 +95,39 @@ def _export_entity_types(ctx, directory): for definition in definitions_sorted: tag = definition['type'] - json_string = json.dumps(definition, indent=4) - _file_name(directory, tag, json_string, "json") + _file_name(directory, tag, definition, "json") def _export_ip_allowlist(ctx, directory): directory = _directory_name(directory, "ip-allowlist") file = directory + "/ip-allowlist.json" content = ip_allowlist.get(ctx, page=None, page_size=None, _print=False) - _file_name(directory, "ip-allowlist", str(content), "json") + _file_name(directory, "ip-allowlist", content, "json") + +def _export_entity_relationship_types(ctx, directory): + directory = _directory_name(directory, "entity-relationship-types") + + data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False) + relationship_types_sorted = sorted(data['relationshipTypes'], key=lambda x: x["tag"]) + + for rel_type in relationship_types_sorted: + tag = rel_type['tag'] + _file_name(directory, tag, rel_type, "json") + +def _export_entity_relationships(ctx, directory): + directory = _directory_name(directory, "entity-relationships") + + # First get all relationship types + rel_types_data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False) + rel_types = [rt['tag'] for rt in rel_types_data['relationshipTypes']] + + # For each relationship type, export all relationships + for rel_type in sorted(rel_types): + data = entity_relationships.list(ctx, relationship_type=rel_type, page=None, page_size=250, _print=False) + relationships = data.get('relationships', []) + + if relationships: + _file_name(directory, rel_type, relationships, "json") def _export_plugins(ctx, directory): directory = _directory_name(directory, "plugins") @@ -179,6 +205,8 @@ def _export_workflows(ctx, directory): backupTypes = { "catalog", "entity-types", + "entity-relationship-types", + "entity-relationships", "ip-allowlist", "plugins", "scorecards", @@ -226,6 +254,8 @@ def export( Exports the following objects: - catalog - entity-types + - entity-relationship-types + - entity-relationships - ip-allowlist - plugins - scorecards @@ -240,14 +270,13 @@ def export( cortex backup export --export-types catalog --catalog-types AWS::S3::Bucket It does not back up everything in the tenant. For example these objects are not backed up: - - api-keys + - api-keys - custom-events - custom-metadata created by the public API - custom-metrics - dependencies created by the API - deploys - docs created by the API - - entity-relationships created by the API - groups added by the API - packages - secrets @@ -265,6 +294,10 @@ def export( _export_catalog(ctx, directory, catalog_types) if "entity-types" in export_types: _export_entity_types(ctx, directory) + if "entity-relationship-types" in export_types: + _export_entity_relationship_types(ctx, directory) + if "entity-relationships" in export_types: + _export_entity_relationships(ctx, directory) if "ip-allowlist" in export_types: _export_ip_allowlist(ctx, directory) if "plugins" in export_types: @@ -295,6 +328,50 @@ def _import_entity_types(ctx, force, directory): print(" Importing: " + filename) entity_types.create(ctx, file_input=open(file_path), force=force) +def _import_entity_relationship_types(ctx, directory): + if os.path.isdir(directory): + print("Processing: " + directory) + for filename in sorted(os.listdir(directory)): + file_path = os.path.join(directory, filename) + if os.path.isfile(file_path): + print(" Importing: " + filename) + entity_relationship_types.create(ctx, file_input=open(file_path)) + +def _import_entity_relationships(ctx, directory): + if os.path.isdir(directory): + print("Processing: " + directory) + for filename in sorted(os.listdir(directory)): + file_path = os.path.join(directory, filename) + if os.path.isfile(file_path): + # Extract relationship type from filename (without .json extension) + rel_type = filename.replace('.json', '') + print(f" Importing relationships for: {rel_type}") + + # Read the relationships file + with open(file_path) as f: + relationships = json.load(f) + + # Convert list format to the format expected by update-bulk + # The export saves the raw relationships list, but update-bulk needs {"relationships": [...]} + if isinstance(relationships, list): + data = {"relationships": []} + for rel in relationships: + # Extract source and destination tags + data["relationships"].append({ + "source": rel.get("source", {}).get("tag"), + "destination": rel.get("destination", {}).get("tag") + }) + + # Use update-bulk to replace all relationships for this type + temp_file = typer.unstable.TempFile(mode='w', suffix='.json', delete=False) + json.dump(data, temp_file) + temp_file.close() + + try: + entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file.name), force=True) + finally: + os.unlink(temp_file.name) + def _import_catalog(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) @@ -429,7 +506,9 @@ def import_tenant( _import_ip_allowlist(ctx, directory + "/ip-allowlist") _import_entity_types(ctx, force, directory + "/entity-types") + _import_entity_relationship_types(ctx, directory + "/entity-relationship-types") _import_catalog(ctx, directory + "/catalog") + _import_entity_relationships(ctx, directory + "/entity-relationships") _import_plugins(ctx, directory + "/plugins") _import_scorecards(ctx, directory + "/scorecards") _import_workflows(ctx, directory + "/workflows") diff --git a/cortexapps_cli/commands/entity_relationship_types.py b/cortexapps_cli/commands/entity_relationship_types.py new file mode 100644 index 0000000..613375e --- /dev/null +++ b/cortexapps_cli/commands/entity_relationship_types.py @@ -0,0 +1,112 @@ +import typer +import json +from typing_extensions import Annotated +from cortexapps_cli.utils import print_output_with_context +from cortexapps_cli.command_options import ListCommandOptions + +app = typer.Typer( + help="Entity Relationship Types commands", + no_args_is_help=True +) + +@app.command() +def list( + ctx: typer.Context, + page: ListCommandOptions.page = None, + page_size: ListCommandOptions.page_size = 250, + table_output: ListCommandOptions.table_output = False, + csv_output: ListCommandOptions.csv_output = False, + columns: ListCommandOptions.columns = [], + no_headers: ListCommandOptions.no_headers = False, + filters: ListCommandOptions.filters = [], + sort: ListCommandOptions.sort = [], +): + """ + List entity relationship types + """ + client = ctx.obj["client"] + + params = { + "page": page, + "pageSize": page_size + } + + if (table_output or csv_output) and not ctx.params.get('columns'): + ctx.params['columns'] = [ + "Tag=tag", + "Name=name", + "Description=description", + ] + + # remove any params that are None + params = {k: v for k, v in params.items() if v is not None} + + if page is None: + r = client.fetch("api/v1/relationship-types", params=params) + else: + r = client.get("api/v1/relationship-types", params=params) + print_output_with_context(ctx, r) + +@app.command() +def get( + ctx: typer.Context, + tag: str = typer.Option(..., "--tag", "-t", help="Relationship type tag"), +): + """ + Get a relationship type by tag + """ + client = ctx.obj["client"] + r = client.get(f"api/v1/relationship-types/{tag}") + print_output_with_context(ctx, r) + +@app.command() +def create( + ctx: typer.Context, + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationship type definition; can be passed as stdin with -, example: -f-")] = ..., +): + """ + Create a relationship type + + Provide a JSON file with the relationship type definition including required fields: + - tag: unique identifier + - name: human-readable name + - definitionLocation: SOURCE, DESTINATION, or BOTH + - allowCycles: boolean + - createCatalog: boolean + - isSingleSource: boolean + - isSingleDestination: boolean + - sourcesFilter: object with include/types configuration + - destinationsFilter: object with include/types configuration + - inheritances: array of inheritance settings + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + r = client.post("api/v1/relationship-types", data=data) + print_output_with_context(ctx, r) + +@app.command() +def update( + ctx: typer.Context, + tag: str = typer.Option(..., "--tag", "-t", help="Relationship type tag"), + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationship type definition; can be passed as stdin with -, example: -f-")] = ..., +): + """ + Update a relationship type + + Provide a JSON file with the relationship type definition to update. + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + r = client.put(f"api/v1/relationship-types/{tag}", data=data) + print_output_with_context(ctx, r) + +@app.command() +def delete( + ctx: typer.Context, + tag: str = typer.Option(..., "--tag", "-t", help="Relationship type tag"), +): + """ + Delete a relationship type + """ + client = ctx.obj["client"] + client.delete(f"api/v1/relationship-types/{tag}") diff --git a/cortexapps_cli/commands/entity_relationships.py b/cortexapps_cli/commands/entity_relationships.py new file mode 100644 index 0000000..483e98b --- /dev/null +++ b/cortexapps_cli/commands/entity_relationships.py @@ -0,0 +1,215 @@ +import typer +import json +from typing_extensions import Annotated +from cortexapps_cli.utils import print_output_with_context +from cortexapps_cli.command_options import ListCommandOptions + +app = typer.Typer( + help="Entity Relationships commands (Beta)", + no_args_is_help=True +) + +@app.command() +def list( + ctx: typer.Context, + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + page: ListCommandOptions.page = None, + page_size: ListCommandOptions.page_size = 250, + table_output: ListCommandOptions.table_output = False, + csv_output: ListCommandOptions.csv_output = False, + columns: ListCommandOptions.columns = [], + no_headers: ListCommandOptions.no_headers = False, + filters: ListCommandOptions.filters = [], + sort: ListCommandOptions.sort = [], +): + """ + List all relationships for a given relationship type + """ + client = ctx.obj["client"] + + params = { + "page": page, + "pageSize": page_size + } + + if (table_output or csv_output) and not ctx.params.get('columns'): + ctx.params['columns'] = [ + "Source=source.tag", + "Destination=destination.tag", + "Provider=providerType", + ] + + # remove any params that are None + params = {k: v for k, v in params.items() if v is not None} + + if page is None: + r = client.fetch(f"api/v1/relationships/{relationship_type}", params=params) + else: + r = client.get(f"api/v1/relationships/{relationship_type}", params=params) + print_output_with_context(ctx, r) + +@app.command() +def list_destinations( + ctx: typer.Context, + entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"), + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + depth: int = typer.Option(1, "--depth", "-d", help="Maximum hierarchy depth"), + include_archived: bool = typer.Option(False, "--include-archived", help="Include archived entities"), +): + """ + List destination entities for a given source entity and relationship type + """ + client = ctx.obj["client"] + + params = { + "depth": depth, + "includeArchived": include_archived + } + + r = client.get(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/destinations", params=params) + print_output_with_context(ctx, r) + +@app.command() +def list_sources( + ctx: typer.Context, + entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"), + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + depth: int = typer.Option(1, "--depth", "-d", help="Maximum hierarchy depth"), + include_archived: bool = typer.Option(False, "--include-archived", help="Include archived entities"), +): + """ + List source entities for a given destination entity and relationship type + """ + client = ctx.obj["client"] + + params = { + "depth": depth, + "includeArchived": include_archived + } + + r = client.get(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/sources", params=params) + print_output_with_context(ctx, r) + +@app.command() +def add_destinations( + ctx: typer.Context, + entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"), + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing destinations array; can be passed as stdin with -, example: -f-")] = ..., + force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"), +): + """ + Add destination entities for a given source entity + + Provide a JSON file with: {"destinations": ["entity-1", "entity-2"]} + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + + params = {"force": force} if force else {} + + r = client.post(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/destinations", data=data, params=params) + print_output_with_context(ctx, r) + +@app.command() +def add_sources( + ctx: typer.Context, + entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"), + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing sources array; can be passed as stdin with -, example: -f-")] = ..., + force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"), +): + """ + Add source entities for a given destination entity + + Provide a JSON file with: {"sources": ["entity-1", "entity-2"]} + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + + params = {"force": force} if force else {} + + r = client.post(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/sources", data=data, params=params) + print_output_with_context(ctx, r) + +@app.command() +def update_destinations( + ctx: typer.Context, + entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"), + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing destinations array; can be passed as stdin with -, example: -f-")] = ..., + force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"), +): + """ + Replace all destination entities for a given source entity + + Provide a JSON file with: {"destinations": ["entity-1", "entity-2"]} + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + + params = {"force": force} if force else {} + + r = client.put(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/destinations", data=data, params=params) + print_output_with_context(ctx, r) + +@app.command() +def update_sources( + ctx: typer.Context, + entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"), + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing sources array; can be passed as stdin with -, example: -f-")] = ..., + force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"), +): + """ + Replace all source entities for a given destination entity + + Provide a JSON file with: {"sources": ["entity-1", "entity-2"]} + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + + params = {"force": force} if force else {} + + r = client.put(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/sources", data=data, params=params) + print_output_with_context(ctx, r) + +@app.command() +def add_bulk( + ctx: typer.Context, + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationships array; can be passed as stdin with -, example: -f-")] = ..., + force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"), +): + """ + Add multiple relationships in bulk + + Provide a JSON file with: {"relationships": [{"source": "tag1", "destination": "tag2"}]} + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + + params = {"force": force} if force else {} + + r = client.post(f"api/v1/relationships/{relationship_type}", data=data, params=params) + print_output_with_context(ctx, r) + +@app.command() +def update_bulk( + ctx: typer.Context, + relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationships array; can be passed as stdin with -, example: -f-")] = ..., + force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"), +): + """ + Replace all relationships for a given relationship type + + Provide a JSON file with: {"relationships": [{"source": "tag1", "destination": "tag2"}]} + """ + client = ctx.obj["client"] + data = json.loads("".join([line for line in file_input])) + + params = {"force": force} if force else {} + + r = client.put(f"api/v1/relationships/{relationship_type}", data=data, params=params) + print_output_with_context(ctx, r) From 426b14258b61f359cdbfc9cdff7d35373b3c5d0f Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 15:47:36 -0800 Subject: [PATCH 08/16] fix: clean up entity relationships import output and fix bugs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add _print parameter to entity_relationship_types.create() and entity_relationships.update_bulk() - Use _print=False when importing to suppress JSON output - Fix import to use correct keys: sourceEntity.tag and destinationEntity.tag instead of source.tag - Replace typer.unstable.TempFile with standard tempfile.NamedTemporaryFile - Improve output: show only tag names instead of full JSON when importing - Add missing tempfile import 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 28 +++++++++++-------- .../commands/entity_relationship_types.py | 13 +++++++-- .../commands/entity_relationships.py | 13 +++++++-- 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index 365907c..ed001ec 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -5,6 +5,7 @@ import typer import json import os +import tempfile from rich import print, print_json from rich.console import Console from enum import Enum @@ -334,8 +335,10 @@ def _import_entity_relationship_types(ctx, directory): for filename in sorted(os.listdir(directory)): file_path = os.path.join(directory, filename) if os.path.isfile(file_path): - print(" Importing: " + filename) - entity_relationship_types.create(ctx, file_input=open(file_path)) + # Extract the tag from filename for cleaner output + tag = filename.replace('.json', '') + print(f" Importing: {tag}") + entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) def _import_entity_relationships(ctx, directory): if os.path.isdir(directory): @@ -345,7 +348,7 @@ def _import_entity_relationships(ctx, directory): if os.path.isfile(file_path): # Extract relationship type from filename (without .json extension) rel_type = filename.replace('.json', '') - print(f" Importing relationships for: {rel_type}") + print(f" Importing: {rel_type}") # Read the relationships file with open(file_path) as f: @@ -356,21 +359,24 @@ def _import_entity_relationships(ctx, directory): if isinstance(relationships, list): data = {"relationships": []} for rel in relationships: - # Extract source and destination tags + # Extract source and destination tags from sourceEntity and destinationEntity + source_tag = rel.get("sourceEntity", {}).get("tag") + dest_tag = rel.get("destinationEntity", {}).get("tag") data["relationships"].append({ - "source": rel.get("source", {}).get("tag"), - "destination": rel.get("destination", {}).get("tag") + "source": source_tag, + "destination": dest_tag }) # Use update-bulk to replace all relationships for this type - temp_file = typer.unstable.TempFile(mode='w', suffix='.json', delete=False) - json.dump(data, temp_file) - temp_file.close() + # Create a temporary file to pass the data + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file: + json.dump(data, temp_file) + temp_file_name = temp_file.name try: - entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file.name), force=True) + entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file_name), force=True, _print=False) finally: - os.unlink(temp_file.name) + os.unlink(temp_file_name) def _import_catalog(ctx, directory): if os.path.isdir(directory): diff --git a/cortexapps_cli/commands/entity_relationship_types.py b/cortexapps_cli/commands/entity_relationship_types.py index 613375e..b1aa6f4 100644 --- a/cortexapps_cli/commands/entity_relationship_types.py +++ b/cortexapps_cli/commands/entity_relationship_types.py @@ -2,7 +2,7 @@ import json from typing_extensions import Annotated from cortexapps_cli.utils import print_output_with_context -from cortexapps_cli.command_options import ListCommandOptions +from cortexapps_cli.command_options import CommandOptions, ListCommandOptions app = typer.Typer( help="Entity Relationship Types commands", @@ -12,6 +12,7 @@ @app.command() def list( ctx: typer.Context, + _print: CommandOptions._print = True, page: ListCommandOptions.page = None, page_size: ListCommandOptions.page_size = 250, table_output: ListCommandOptions.table_output = False, @@ -45,7 +46,11 @@ def list( r = client.fetch("api/v1/relationship-types", params=params) else: r = client.get("api/v1/relationship-types", params=params) - print_output_with_context(ctx, r) + + if _print: + print_output_with_context(ctx, r) + else: + return r @app.command() def get( @@ -63,6 +68,7 @@ def get( def create( ctx: typer.Context, file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationship type definition; can be passed as stdin with -, example: -f-")] = ..., + _print: CommandOptions._print = True, ): """ Create a relationship type @@ -82,7 +88,8 @@ def create( client = ctx.obj["client"] data = json.loads("".join([line for line in file_input])) r = client.post("api/v1/relationship-types", data=data) - print_output_with_context(ctx, r) + if _print: + print_output_with_context(ctx, r) @app.command() def update( diff --git a/cortexapps_cli/commands/entity_relationships.py b/cortexapps_cli/commands/entity_relationships.py index 483e98b..e6ebcba 100644 --- a/cortexapps_cli/commands/entity_relationships.py +++ b/cortexapps_cli/commands/entity_relationships.py @@ -2,7 +2,7 @@ import json from typing_extensions import Annotated from cortexapps_cli.utils import print_output_with_context -from cortexapps_cli.command_options import ListCommandOptions +from cortexapps_cli.command_options import CommandOptions, ListCommandOptions app = typer.Typer( help="Entity Relationships commands (Beta)", @@ -13,6 +13,7 @@ def list( ctx: typer.Context, relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), + _print: CommandOptions._print = True, page: ListCommandOptions.page = None, page_size: ListCommandOptions.page_size = 250, table_output: ListCommandOptions.table_output = False, @@ -46,7 +47,11 @@ def list( r = client.fetch(f"api/v1/relationships/{relationship_type}", params=params) else: r = client.get(f"api/v1/relationships/{relationship_type}", params=params) - print_output_with_context(ctx, r) + + if _print: + print_output_with_context(ctx, r) + else: + return r @app.command() def list_destinations( @@ -200,6 +205,7 @@ def update_bulk( relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"), file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationships array; can be passed as stdin with -, example: -f-")] = ..., force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"), + _print: CommandOptions._print = True, ): """ Replace all relationships for a given relationship type @@ -212,4 +218,5 @@ def update_bulk( params = {"force": force} if force else {} r = client.put(f"api/v1/relationships/{relationship_type}", data=data, params=params) - print_output_with_context(ctx, r) + if _print: + print_output_with_context(ctx, r) From ce0977783bc955374beb72054b7473d09a4e27a5 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 15:49:31 -0800 Subject: [PATCH 09/16] fix: support re-importing existing entity relationship types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Check if relationship type already exists before importing - Use update instead of create for existing relationship types - Add _print parameter to entity_relationship_types.update() - Matches pattern used by entity_types import This allows backup imports to be idempotent and run multiple times without encountering "already exists" errors. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 14 +++++++++++++- .../commands/entity_relationship_types.py | 4 +++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index ed001ec..1ecb16f 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -332,13 +332,25 @@ def _import_entity_types(ctx, force, directory): def _import_entity_relationship_types(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) + + # Get list of existing relationship types + existing_rel_types_data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False) + existing_tags = {rt['tag'] for rt in existing_rel_types_data.get('relationshipTypes', [])} + for filename in sorted(os.listdir(directory)): file_path = os.path.join(directory, filename) if os.path.isfile(file_path): # Extract the tag from filename for cleaner output tag = filename.replace('.json', '') print(f" Importing: {tag}") - entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) + + # Check if relationship type already exists + if tag in existing_tags: + # Update existing relationship type + entity_relationship_types.update(ctx, tag=tag, file_input=open(file_path), _print=False) + else: + # Create new relationship type + entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) def _import_entity_relationships(ctx, directory): if os.path.isdir(directory): diff --git a/cortexapps_cli/commands/entity_relationship_types.py b/cortexapps_cli/commands/entity_relationship_types.py index b1aa6f4..692256a 100644 --- a/cortexapps_cli/commands/entity_relationship_types.py +++ b/cortexapps_cli/commands/entity_relationship_types.py @@ -96,6 +96,7 @@ def update( ctx: typer.Context, tag: str = typer.Option(..., "--tag", "-t", help="Relationship type tag"), file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationship type definition; can be passed as stdin with -, example: -f-")] = ..., + _print: CommandOptions._print = True, ): """ Update a relationship type @@ -105,7 +106,8 @@ def update( client = ctx.obj["client"] data = json.loads("".join([line for line in file_input])) r = client.put(f"api/v1/relationship-types/{tag}", data=data) - print_output_with_context(ctx, r) + if _print: + print_output_with_context(ctx, r) @app.command() def delete( From 5256f6814a4600ddc90ade8eb4dd8b4b1fe0d696 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 15:51:42 -0800 Subject: [PATCH 10/16] feat: improve error handling in backup import MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add detailed error reporting for catalog imports - Show filename, error type, and error message for failures - Add total failure count at end of catalog import - Add error handling for entity relationship type imports - Wrap create/update in try/except blocks - Show which relationship type failed and why - Add total failure count - Add error handling for entity relationship imports - Wrap import operations in try/except blocks - Show which relationship type failed and why - Add total failure count This makes it much easier to diagnose import failures by showing exactly which files are failing and what the error is. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 107 ++++++++++++++++++------------ 1 file changed, 66 insertions(+), 41 deletions(-) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index 1ecb16f..07625c1 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -337,58 +337,75 @@ def _import_entity_relationship_types(ctx, directory): existing_rel_types_data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False) existing_tags = {rt['tag'] for rt in existing_rel_types_data.get('relationshipTypes', [])} + failed_count = 0 for filename in sorted(os.listdir(directory)): file_path = os.path.join(directory, filename) if os.path.isfile(file_path): # Extract the tag from filename for cleaner output tag = filename.replace('.json', '') - print(f" Importing: {tag}") - # Check if relationship type already exists - if tag in existing_tags: - # Update existing relationship type - entity_relationship_types.update(ctx, tag=tag, file_input=open(file_path), _print=False) - else: - # Create new relationship type - entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) + try: + # Check if relationship type already exists + if tag in existing_tags: + # Update existing relationship type + entity_relationship_types.update(ctx, tag=tag, file_input=open(file_path), _print=False) + else: + # Create new relationship type + entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) + print(f" Importing: {tag}") + except Exception as e: + print(f" Failed to import {tag}: {type(e).__name__} - {str(e)}") + failed_count += 1 + + if failed_count > 0: + print(f"\n Total entity relationship type import failures: {failed_count}") def _import_entity_relationships(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) + failed_count = 0 for filename in sorted(os.listdir(directory)): file_path = os.path.join(directory, filename) if os.path.isfile(file_path): # Extract relationship type from filename (without .json extension) rel_type = filename.replace('.json', '') - print(f" Importing: {rel_type}") - # Read the relationships file - with open(file_path) as f: - relationships = json.load(f) - - # Convert list format to the format expected by update-bulk - # The export saves the raw relationships list, but update-bulk needs {"relationships": [...]} - if isinstance(relationships, list): - data = {"relationships": []} - for rel in relationships: - # Extract source and destination tags from sourceEntity and destinationEntity - source_tag = rel.get("sourceEntity", {}).get("tag") - dest_tag = rel.get("destinationEntity", {}).get("tag") - data["relationships"].append({ - "source": source_tag, - "destination": dest_tag - }) - - # Use update-bulk to replace all relationships for this type - # Create a temporary file to pass the data - with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file: - json.dump(data, temp_file) - temp_file_name = temp_file.name - - try: - entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file_name), force=True, _print=False) - finally: - os.unlink(temp_file_name) + try: + # Read the relationships file + with open(file_path) as f: + relationships = json.load(f) + + # Convert list format to the format expected by update-bulk + # The export saves the raw relationships list, but update-bulk needs {"relationships": [...]} + if isinstance(relationships, list): + data = {"relationships": []} + for rel in relationships: + # Extract source and destination tags from sourceEntity and destinationEntity + source_tag = rel.get("sourceEntity", {}).get("tag") + dest_tag = rel.get("destinationEntity", {}).get("tag") + data["relationships"].append({ + "source": source_tag, + "destination": dest_tag + }) + + # Use update-bulk to replace all relationships for this type + # Create a temporary file to pass the data + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file: + json.dump(data, temp_file) + temp_file_name = temp_file.name + + try: + entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file_name), force=True, _print=False) + finally: + os.unlink(temp_file_name) + + print(f" Importing: {rel_type}") + except Exception as e: + print(f" Failed to import {rel_type}: {type(e).__name__} - {str(e)}") + failed_count += 1 + + if failed_count > 0: + print(f"\n Total entity relationship import failures: {failed_count}") def _import_catalog(ctx, directory): if os.path.isdir(directory): @@ -402,9 +419,12 @@ def import_catalog_file(file_info): try: with open(file_path) as f: catalog.create(ctx, file_input=f, _print=False) - return (filename, None) + return (filename, None, None) except Exception as e: - return (filename, str(e)) + # Capture both the error message and type + error_msg = str(e) + error_type = type(e).__name__ + return (filename, error_type, error_msg) # Import all files in parallel with ThreadPoolExecutor(max_workers=30) as executor: @@ -414,12 +434,17 @@ def import_catalog_file(file_info): results.append(future.result()) # Print results in alphabetical order - for filename, error in sorted(results, key=lambda x: x[0]): - if error: - print(f" Failed to import {filename}: {error}") + failed_count = 0 + for filename, error_type, error_msg in sorted(results, key=lambda x: x[0]): + if error_type: + print(f" Failed to import {filename}: {error_type} - {error_msg}") + failed_count += 1 else: print(f" Importing: {filename}") + if failed_count > 0: + print(f"\n Total catalog import failures: {failed_count}") + def _import_plugins(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) From 2afaf8c9f4e3deefc62ac289ea2b594d7b266f80 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 15:54:47 -0800 Subject: [PATCH 11/16] fix: improve catalog import error handling and make sequential MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Change catalog import from parallel to sequential execution - This allows errors to be correlated with specific files - HTTP errors from cortex_client are now shown with filenames - Catch typer.Exit exceptions in catalog import - The HTTP client raises typer.Exit on errors - Now catches and reports which file caused the error - Remove unused imports added for parallel error capture - Simplify catalog import logic Note: The plugin import failures with "string indices must be integers" are due to exports created before the double-encoding bug fix. Re-export with the current code to fix these. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 38 +++++++++++-------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index 07625c1..14e2a60 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -6,6 +6,9 @@ import json import os import tempfile +import sys +from io import StringIO +from contextlib import redirect_stdout, redirect_stderr from rich import print, print_json from rich.console import Console from enum import Enum @@ -410,37 +413,22 @@ def _import_entity_relationships(ctx, directory): def _import_catalog(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) - files = [(filename, os.path.join(directory, filename)) - for filename in sorted(os.listdir(directory)) - if os.path.isfile(os.path.join(directory, filename))] + files = sorted([filename for filename in os.listdir(directory) + if os.path.isfile(os.path.join(directory, filename))]) - def import_catalog_file(file_info): - filename, file_path = file_info + failed_count = 0 + for filename in files: + file_path = os.path.join(directory, filename) try: with open(file_path) as f: catalog.create(ctx, file_input=f, _print=False) - return (filename, None, None) + print(f" Importing: {filename}") + except typer.Exit as e: + print(f" Failed to import {filename}: HTTP error (see above)") + failed_count += 1 except Exception as e: - # Capture both the error message and type - error_msg = str(e) - error_type = type(e).__name__ - return (filename, error_type, error_msg) - - # Import all files in parallel - with ThreadPoolExecutor(max_workers=30) as executor: - futures = {executor.submit(import_catalog_file, file_info): file_info[0] for file_info in files} - results = [] - for future in as_completed(futures): - results.append(future.result()) - - # Print results in alphabetical order - failed_count = 0 - for filename, error_type, error_msg in sorted(results, key=lambda x: x[0]): - if error_type: - print(f" Failed to import {filename}: {error_type} - {error_msg}") + print(f" Failed to import {filename}: {type(e).__name__} - {str(e)}") failed_count += 1 - else: - print(f" Importing: {filename}") if failed_count > 0: print(f"\n Total catalog import failures: {failed_count}") From 55a5453f958d628fc9309a97014bd82a0a766473 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 16:08:17 -0800 Subject: [PATCH 12/16] perf: parallelize entity relationships and catalog imports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Restore parallel execution for catalog import (30 workers) - Previously made sequential for error debugging - Now handles typer.Exit exceptions properly - Maintains good error reporting with filenames - Parallelize entity relationship type imports (30 workers) - Check existing types once, then import in parallel - Properly handles create vs update decision - Parallelize entity relationship imports (30 workers) - Each relationship type is independent - Can safely import in parallel All imports now use ThreadPoolExecutor with 30 workers for maximum performance while maintaining error reporting. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 182 +++++++++++++++++++----------- 1 file changed, 116 insertions(+), 66 deletions(-) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index 14e2a60..061ddc9 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -340,25 +340,42 @@ def _import_entity_relationship_types(ctx, directory): existing_rel_types_data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False) existing_tags = {rt['tag'] for rt in existing_rel_types_data.get('relationshipTypes', [])} + files = [(filename, os.path.join(directory, filename)) + for filename in sorted(os.listdir(directory)) + if os.path.isfile(os.path.join(directory, filename))] + + def import_rel_type_file(file_info): + filename, file_path = file_info + tag = filename.replace('.json', '') + try: + # Check if relationship type already exists + if tag in existing_tags: + # Update existing relationship type + entity_relationship_types.update(ctx, tag=tag, file_input=open(file_path), _print=False) + else: + # Create new relationship type + entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) + return (tag, None, None) + except typer.Exit as e: + return (tag, "HTTP", "Validation or HTTP error") + except Exception as e: + return (tag, type(e).__name__, str(e)) + + # Import all files in parallel + with ThreadPoolExecutor(max_workers=30) as executor: + futures = {executor.submit(import_rel_type_file, file_info): file_info[0] for file_info in files} + results = [] + for future in as_completed(futures): + results.append(future.result()) + + # Print results in alphabetical order failed_count = 0 - for filename in sorted(os.listdir(directory)): - file_path = os.path.join(directory, filename) - if os.path.isfile(file_path): - # Extract the tag from filename for cleaner output - tag = filename.replace('.json', '') - - try: - # Check if relationship type already exists - if tag in existing_tags: - # Update existing relationship type - entity_relationship_types.update(ctx, tag=tag, file_input=open(file_path), _print=False) - else: - # Create new relationship type - entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) - print(f" Importing: {tag}") - except Exception as e: - print(f" Failed to import {tag}: {type(e).__name__} - {str(e)}") - failed_count += 1 + for tag, error_type, error_msg in sorted(results, key=lambda x: x[0]): + if error_type: + print(f" Failed to import {tag}: {error_type} - {error_msg}") + failed_count += 1 + else: + print(f" Importing: {tag}") if failed_count > 0: print(f"\n Total entity relationship type import failures: {failed_count}") @@ -366,46 +383,64 @@ def _import_entity_relationship_types(ctx, directory): def _import_entity_relationships(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) + + files = [(filename, os.path.join(directory, filename)) + for filename in sorted(os.listdir(directory)) + if os.path.isfile(os.path.join(directory, filename))] + + def import_relationships_file(file_info): + filename, file_path = file_info + rel_type = filename.replace('.json', '') + try: + # Read the relationships file + with open(file_path) as f: + relationships = json.load(f) + + # Convert list format to the format expected by update-bulk + # The export saves the raw relationships list, but update-bulk needs {"relationships": [...]} + if isinstance(relationships, list): + data = {"relationships": []} + for rel in relationships: + # Extract source and destination tags from sourceEntity and destinationEntity + source_tag = rel.get("sourceEntity", {}).get("tag") + dest_tag = rel.get("destinationEntity", {}).get("tag") + data["relationships"].append({ + "source": source_tag, + "destination": dest_tag + }) + + # Use update-bulk to replace all relationships for this type + # Create a temporary file to pass the data + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file: + json.dump(data, temp_file) + temp_file_name = temp_file.name + + try: + entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file_name), force=True, _print=False) + finally: + os.unlink(temp_file_name) + + return (rel_type, None, None) + except typer.Exit as e: + return (rel_type, "HTTP", "Validation or HTTP error") + except Exception as e: + return (rel_type, type(e).__name__, str(e)) + + # Import all files in parallel + with ThreadPoolExecutor(max_workers=30) as executor: + futures = {executor.submit(import_relationships_file, file_info): file_info[0] for file_info in files} + results = [] + for future in as_completed(futures): + results.append(future.result()) + + # Print results in alphabetical order failed_count = 0 - for filename in sorted(os.listdir(directory)): - file_path = os.path.join(directory, filename) - if os.path.isfile(file_path): - # Extract relationship type from filename (without .json extension) - rel_type = filename.replace('.json', '') - - try: - # Read the relationships file - with open(file_path) as f: - relationships = json.load(f) - - # Convert list format to the format expected by update-bulk - # The export saves the raw relationships list, but update-bulk needs {"relationships": [...]} - if isinstance(relationships, list): - data = {"relationships": []} - for rel in relationships: - # Extract source and destination tags from sourceEntity and destinationEntity - source_tag = rel.get("sourceEntity", {}).get("tag") - dest_tag = rel.get("destinationEntity", {}).get("tag") - data["relationships"].append({ - "source": source_tag, - "destination": dest_tag - }) - - # Use update-bulk to replace all relationships for this type - # Create a temporary file to pass the data - with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file: - json.dump(data, temp_file) - temp_file_name = temp_file.name - - try: - entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file_name), force=True, _print=False) - finally: - os.unlink(temp_file_name) - - print(f" Importing: {rel_type}") - except Exception as e: - print(f" Failed to import {rel_type}: {type(e).__name__} - {str(e)}") - failed_count += 1 + for rel_type, error_type, error_msg in sorted(results, key=lambda x: x[0]): + if error_type: + print(f" Failed to import {rel_type}: {error_type} - {error_msg}") + failed_count += 1 + else: + print(f" Importing: {rel_type}") if failed_count > 0: print(f"\n Total entity relationship import failures: {failed_count}") @@ -413,22 +448,37 @@ def _import_entity_relationships(ctx, directory): def _import_catalog(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) - files = sorted([filename for filename in os.listdir(directory) - if os.path.isfile(os.path.join(directory, filename))]) + files = [(filename, os.path.join(directory, filename)) + for filename in sorted(os.listdir(directory)) + if os.path.isfile(os.path.join(directory, filename))] - failed_count = 0 - for filename in files: - file_path = os.path.join(directory, filename) + def import_catalog_file(file_info): + filename, file_path = file_info try: with open(file_path) as f: catalog.create(ctx, file_input=f, _print=False) - print(f" Importing: {filename}") + return (filename, None, None) except typer.Exit as e: - print(f" Failed to import {filename}: HTTP error (see above)") - failed_count += 1 + # typer.Exit is raised by the HTTP client on errors + return (filename, "HTTP", "Validation or HTTP error") except Exception as e: - print(f" Failed to import {filename}: {type(e).__name__} - {str(e)}") + return (filename, type(e).__name__, str(e)) + + # Import all files in parallel + with ThreadPoolExecutor(max_workers=30) as executor: + futures = {executor.submit(import_catalog_file, file_info): file_info[0] for file_info in files} + results = [] + for future in as_completed(futures): + results.append(future.result()) + + # Print results in alphabetical order + failed_count = 0 + for filename, error_type, error_msg in sorted(results, key=lambda x: x[0]): + if error_type: + print(f" Failed to import {filename}: {error_type} - {error_msg}") failed_count += 1 + else: + print(f" Importing: {filename}") if failed_count > 0: print(f"\n Total catalog import failures: {failed_count}") From f16308aee52ef8a5924d311b7416a38b5c0ffae0 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 16:17:09 -0800 Subject: [PATCH 13/16] feat: add comprehensive import summary and retry commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - All import functions now return statistics (type, imported count, failures) - Import summary section shows: - Per-type import counts and failures - Total imported and failed counts - Failed imports section lists: - Full file paths for all failures - Error type and message for each - Retry commands section provides: - Ready-to-run cortex commands for each failed file - Can copy/paste directly from terminal - Commands use proper quoting for file paths with spaces - Updated all import functions to track file paths in parallel execution - Added typer.Exit exception handling to plugins, scorecards, workflows - Consistent error reporting across all import types Example output: IMPORT SUMMARY catalog: 250 imported, 5 failed TOTAL: 500 imported, 5 failed FAILED IMPORTS /path/to/file.yaml Error: HTTP - Validation or HTTP error RETRY COMMANDS cortex catalog create -f "/path/to/file.yaml" 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 174 +++++++++++++++++++++++------- 1 file changed, 135 insertions(+), 39 deletions(-) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index 061ddc9..ef5e294 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -315,22 +315,38 @@ def export( print("Contents available in " + directory) def _import_ip_allowlist(ctx, directory): + imported = 0 + failed = [] if os.path.isdir(directory): print("Processing: " + directory) for filename in os.listdir(directory): file_path = os.path.join(directory, filename) if os.path.isfile(file_path): - print(" Importing: " + filename) - ip_allowlist.replace(ctx, file_input=open(file_path), addresses=None, force=False, _print=False) + try: + print(" Importing: " + filename) + ip_allowlist.replace(ctx, file_input=open(file_path), addresses=None, force=False, _print=False) + imported += 1 + except Exception as e: + print(f" Failed to import {filename}: {type(e).__name__} - {str(e)}") + failed.append((file_path, type(e).__name__, str(e))) + return ("ip-allowlist", imported, failed) def _import_entity_types(ctx, force, directory): + imported = 0 + failed = [] if os.path.isdir(directory): print("Processing: " + directory) for filename in sorted(os.listdir(directory)): file_path = os.path.join(directory, filename) if os.path.isfile(file_path): - print(" Importing: " + filename) - entity_types.create(ctx, file_input=open(file_path), force=force) + try: + print(" Importing: " + filename) + entity_types.create(ctx, file_input=open(file_path), force=force) + imported += 1 + except Exception as e: + print(f" Failed to import {filename}: {type(e).__name__} - {str(e)}") + failed.append((file_path, type(e).__name__, str(e))) + return ("entity-types", imported, failed) def _import_entity_relationship_types(ctx, directory): if os.path.isdir(directory): @@ -355,11 +371,11 @@ def import_rel_type_file(file_info): else: # Create new relationship type entity_relationship_types.create(ctx, file_input=open(file_path), _print=False) - return (tag, None, None) + return (tag, file_path, None, None) except typer.Exit as e: - return (tag, "HTTP", "Validation or HTTP error") + return (tag, file_path, "HTTP", "Validation or HTTP error") except Exception as e: - return (tag, type(e).__name__, str(e)) + return (tag, file_path, type(e).__name__, str(e)) # Import all files in parallel with ThreadPoolExecutor(max_workers=30) as executor: @@ -370,7 +386,7 @@ def import_rel_type_file(file_info): # Print results in alphabetical order failed_count = 0 - for tag, error_type, error_msg in sorted(results, key=lambda x: x[0]): + for tag, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]): if error_type: print(f" Failed to import {tag}: {error_type} - {error_msg}") failed_count += 1 @@ -380,6 +396,8 @@ def import_rel_type_file(file_info): if failed_count > 0: print(f"\n Total entity relationship type import failures: {failed_count}") + return ("entity-relationship-types", len(results) - failed_count, [(fp, et, em) for tag, fp, et, em in results if et]) + def _import_entity_relationships(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) @@ -420,11 +438,11 @@ def import_relationships_file(file_info): finally: os.unlink(temp_file_name) - return (rel_type, None, None) + return (rel_type, file_path, None, None) except typer.Exit as e: - return (rel_type, "HTTP", "Validation or HTTP error") + return (rel_type, file_path, "HTTP", "Validation or HTTP error") except Exception as e: - return (rel_type, type(e).__name__, str(e)) + return (rel_type, file_path, type(e).__name__, str(e)) # Import all files in parallel with ThreadPoolExecutor(max_workers=30) as executor: @@ -435,7 +453,7 @@ def import_relationships_file(file_info): # Print results in alphabetical order failed_count = 0 - for rel_type, error_type, error_msg in sorted(results, key=lambda x: x[0]): + for rel_type, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]): if error_type: print(f" Failed to import {rel_type}: {error_type} - {error_msg}") failed_count += 1 @@ -445,6 +463,8 @@ def import_relationships_file(file_info): if failed_count > 0: print(f"\n Total entity relationship import failures: {failed_count}") + return ("entity-relationships", len(results) - failed_count, [(fp, et, em) for rt, fp, et, em in results if et]) + def _import_catalog(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) @@ -457,12 +477,12 @@ def import_catalog_file(file_info): try: with open(file_path) as f: catalog.create(ctx, file_input=f, _print=False) - return (filename, None, None) + return (filename, file_path, None, None) except typer.Exit as e: # typer.Exit is raised by the HTTP client on errors - return (filename, "HTTP", "Validation or HTTP error") + return (filename, file_path, "HTTP", "Validation or HTTP error") except Exception as e: - return (filename, type(e).__name__, str(e)) + return (filename, file_path, type(e).__name__, str(e)) # Import all files in parallel with ThreadPoolExecutor(max_workers=30) as executor: @@ -473,7 +493,7 @@ def import_catalog_file(file_info): # Print results in alphabetical order failed_count = 0 - for filename, error_type, error_msg in sorted(results, key=lambda x: x[0]): + for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]): if error_type: print(f" Failed to import {filename}: {error_type} - {error_msg}") failed_count += 1 @@ -483,6 +503,8 @@ def import_catalog_file(file_info): if failed_count > 0: print(f"\n Total catalog import failures: {failed_count}") + return ("catalog", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et]) + def _import_plugins(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) @@ -495,9 +517,11 @@ def import_plugin_file(file_info): try: with open(file_path) as f: plugins.create(ctx, file_input=f, force=True) - return (filename, None) + return (filename, file_path, None, None) + except typer.Exit as e: + return (filename, file_path, "HTTP", "Validation or HTTP error") except Exception as e: - return (filename, str(e)) + return (filename, file_path, type(e).__name__, str(e)) # Import all files in parallel with ThreadPoolExecutor(max_workers=30) as executor: @@ -507,12 +531,16 @@ def import_plugin_file(file_info): results.append(future.result()) # Print results in alphabetical order - for filename, error in sorted(results, key=lambda x: x[0]): - if error: - print(f" Failed to import {filename}: {error}") + failed_count = 0 + for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]): + if error_type: + print(f" Failed to import {filename}: {error_type} - {error_msg}") + failed_count += 1 else: print(f" Importing: {filename}") + return ("plugins", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et]) + def _import_scorecards(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) @@ -525,9 +553,11 @@ def import_scorecard_file(file_info): try: with open(file_path) as f: scorecards.create(ctx, file_input=f, dry_run=False) - return (filename, None) + return (filename, file_path, None, None) + except typer.Exit as e: + return (filename, file_path, "HTTP", "Validation or HTTP error") except Exception as e: - return (filename, str(e)) + return (filename, file_path, type(e).__name__, str(e)) # Import all files in parallel with ThreadPoolExecutor(max_workers=30) as executor: @@ -537,12 +567,16 @@ def import_scorecard_file(file_info): results.append(future.result()) # Print results in alphabetical order - for filename, error in sorted(results, key=lambda x: x[0]): - if error: - print(f" Failed to import {filename}: {error}") + failed_count = 0 + for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]): + if error_type: + print(f" Failed to import {filename}: {error_type} - {error_msg}") + failed_count += 1 else: print(f" Importing: {filename}") + return ("scorecards", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et]) + def _import_workflows(ctx, directory): if os.path.isdir(directory): print("Processing: " + directory) @@ -555,9 +589,11 @@ def import_workflow_file(file_info): try: with open(file_path) as f: workflows.create(ctx, file_input=f) - return (filename, None) + return (filename, file_path, None, None) + except typer.Exit as e: + return (filename, file_path, "HTTP", "Validation or HTTP error") except Exception as e: - return (filename, str(e)) + return (filename, file_path, type(e).__name__, str(e)) # Import all files in parallel with ThreadPoolExecutor(max_workers=30) as executor: @@ -567,12 +603,16 @@ def import_workflow_file(file_info): results.append(future.result()) # Print results in alphabetical order - for filename, error in sorted(results, key=lambda x: x[0]): - if error: - print(f" Failed to import {filename}: {error}") + failed_count = 0 + for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]): + if error_type: + print(f" Failed to import {filename}: {error_type} - {error_msg}") + failed_count += 1 else: print(f" Importing: {filename}") + return ("workflows", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et]) + @app.command("import") def import_tenant( ctx: typer.Context, @@ -585,11 +625,67 @@ def import_tenant( client = ctx.obj["client"] - _import_ip_allowlist(ctx, directory + "/ip-allowlist") - _import_entity_types(ctx, force, directory + "/entity-types") - _import_entity_relationship_types(ctx, directory + "/entity-relationship-types") - _import_catalog(ctx, directory + "/catalog") - _import_entity_relationships(ctx, directory + "/entity-relationships") - _import_plugins(ctx, directory + "/plugins") - _import_scorecards(ctx, directory + "/scorecards") - _import_workflows(ctx, directory + "/workflows") + # Collect statistics from each import + all_stats = [] + all_stats.append(_import_ip_allowlist(ctx, directory + "/ip-allowlist")) + all_stats.append(_import_entity_types(ctx, force, directory + "/entity-types")) + all_stats.append(_import_entity_relationship_types(ctx, directory + "/entity-relationship-types")) + all_stats.append(_import_catalog(ctx, directory + "/catalog")) + all_stats.append(_import_entity_relationships(ctx, directory + "/entity-relationships")) + all_stats.append(_import_plugins(ctx, directory + "/plugins")) + all_stats.append(_import_scorecards(ctx, directory + "/scorecards")) + all_stats.append(_import_workflows(ctx, directory + "/workflows")) + + # Print summary + print("\n" + "="*80) + print("IMPORT SUMMARY") + print("="*80) + + total_imported = 0 + total_failed = 0 + all_failures = [] + + for import_type, imported, failed in all_stats: + if imported > 0 or len(failed) > 0: + total_imported += imported + total_failed += len(failed) + print(f"\n{import_type}:") + print(f" Imported: {imported}") + if len(failed) > 0: + print(f" Failed: {len(failed)}") + all_failures.extend([(import_type, f, e, m) for f, e, m in failed]) + + print(f"\nTOTAL: {total_imported} imported, {total_failed} failed") + + if len(all_failures) > 0: + print("\n" + "="*80) + print("FAILED IMPORTS") + print("="*80) + print("\nThe following files failed to import:\n") + + for import_type, file_path, error_type, error_msg in all_failures: + print(f" {file_path}") + print(f" Error: {error_type} - {error_msg}") + + print("\n" + "="*80) + print("RETRY COMMANDS") + print("="*80) + print("\nTo retry failed imports, run these commands:\n") + + for import_type, file_path, error_type, error_msg in all_failures: + if import_type == "catalog": + print(f"cortex catalog create -f \"{file_path}\"") + elif import_type == "entity-types": + print(f"cortex entity-types create --force -f \"{file_path}\"") + elif import_type == "entity-relationship-types": + tag = os.path.basename(file_path).replace('.json', '') + print(f"cortex entity-relationship-types create -f \"{file_path}\"") + elif import_type == "entity-relationships": + # These need special handling - would need the relationship type + print(f"# Manual retry needed for entity-relationships: {file_path}") + elif import_type == "plugins": + print(f"cortex plugins create --force -f \"{file_path}\"") + elif import_type == "scorecards": + print(f"cortex scorecards create -f \"{file_path}\"") + elif import_type == "workflows": + print(f"cortex workflows create -f \"{file_path}\"") From 26d1d4c34dd5725e09cc751c6af359d5d3659605 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 16:19:43 -0800 Subject: [PATCH 14/16] fix: show catalog filename before import attempt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Print "Importing: filename" before attempting catalog import - HTTP errors now appear immediately after the filename - Remove duplicate success messages at end - Only show failure count summary This makes it immediately clear which file is causing each HTTP 400 error: Before: Processing: .../catalog HTTP Error 400: Unknown error HTTP Error 400: Unknown error After: Processing: .../catalog Importing: docs.yaml HTTP Error 400: Unknown error Importing: another.yaml HTTP Error 400: Unknown error 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index ef5e294..d4f9d78 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -474,6 +474,7 @@ def _import_catalog(ctx, directory): def import_catalog_file(file_info): filename, file_path = file_info + print(f" Importing: {filename}") try: with open(file_path) as f: catalog.create(ctx, file_input=f, _print=False) @@ -491,14 +492,8 @@ def import_catalog_file(file_info): for future in as_completed(futures): results.append(future.result()) - # Print results in alphabetical order - failed_count = 0 - for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]): - if error_type: - print(f" Failed to import {filename}: {error_type} - {error_msg}") - failed_count += 1 - else: - print(f" Importing: {filename}") + # Count failures + failed_count = sum(1 for filename, file_path, error_type, error_msg in results if error_type) if failed_count > 0: print(f"\n Total catalog import failures: {failed_count}") From 792d03d49cab39dc184049ac0df9343618f9da63 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 17:03:34 -0800 Subject: [PATCH 15/16] fix: improve test isolation for custom events list test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Delete all event types (not just VALIDATE_SERVICE) at test start to prevent interference from parallel tests. The connection pooling performance improvements made tests run much faster, increasing temporal overlap between parallel tests and exposing this existing test isolation issue. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- tests/test_custom_events_list.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_custom_events_list.py b/tests/test_custom_events_list.py index cc0a555..3cef50f 100644 --- a/tests/test_custom_events_list.py +++ b/tests/test_custom_events_list.py @@ -1,7 +1,8 @@ from tests.helpers.utils import * def test(): - cli(["custom-events", "delete-all", "-t", "cli-test-service", "-y", "VALIDATE_SERVICE"]) + # Delete all event types to ensure clean state (not just VALIDATE_SERVICE) + cli(["custom-events", "delete-all", "-t", "cli-test-service"]) cli(["custom-events", "create", "-t", "cli-test-service", "-f", "data/run-time/custom-events.json"]) result = cli(["custom-events", "list", "-t", "cli-test-service"]) From d4ea29c1d8a03f143d260214e7d0b61e91473345 Mon Sep 17 00:00:00 2001 From: Jeff Schnitter Date: Wed, 5 Nov 2025 17:17:40 -0800 Subject: [PATCH 16/16] fix: initialize variables before conditional to prevent NameError MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When entity-relationship-types or entity-relationships directories don't exist (like in test data), the import functions would reference undefined `results` and `failed_count` variables, causing a NameError and preventing subsequent imports from running (including catalog import, breaking tests). This bug was causing test_catalog_delete_entity and test_custom_events_list to fail because the import would crash before importing catalog entities. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- cortexapps_cli/commands/backup.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py index d4f9d78..f5245de 100644 --- a/cortexapps_cli/commands/backup.py +++ b/cortexapps_cli/commands/backup.py @@ -349,6 +349,9 @@ def _import_entity_types(ctx, force, directory): return ("entity-types", imported, failed) def _import_entity_relationship_types(ctx, directory): + results = [] + failed_count = 0 + if os.path.isdir(directory): print("Processing: " + directory) @@ -399,6 +402,9 @@ def import_rel_type_file(file_info): return ("entity-relationship-types", len(results) - failed_count, [(fp, et, em) for tag, fp, et, em in results if et]) def _import_entity_relationships(ctx, directory): + results = [] + failed_count = 0 + if os.path.isdir(directory): print("Processing: " + directory)