diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml
index 5d88da0..8ef0dc3 100644
--- a/.github/workflows/publish.yml
+++ b/.github/workflows/publish.yml
@@ -53,7 +53,7 @@ jobs:
- name: Generate HISTORY.md
run: |
- git-changelog > HISTORY.md
+ git-changelog -c angular > HISTORY.md
cat HISTORY.md
- name: Commit and Push
diff --git a/HISTORY.md b/HISTORY.md
index 0f3a9e8..14ba25d 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -6,6 +6,17 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+## [1.3.0](https://github.com/cortexapps/cli/releases/tag/1.3.0) - 2025-11-05
+
+[Compare with 1.2.0](https://github.com/cortexapps/cli/compare/1.2.0...1.3.0)
+
+### Fixed
+
+- fix: add retry logic for scorecard create to handle active evaluations ([cc40b55](https://github.com/cortexapps/cli/commit/cc40b55ed9ef5af4146360b5a879afc6dc67fe06) by Jeff Schnitter).
+- fix: use json.dump instead of Rich print for file writing ([c66c2fe](https://github.com/cortexapps/cli/commit/c66c2fe438cc95f8343fbd4ba3cecae605c435ea) by Jeff Schnitter).
+- fix: ensure export/import output is in alphabetical order ([9055f78](https://github.com/cortexapps/cli/commit/9055f78cc4e1136da20e4e42883ff3c0f248825b) by Jeff Schnitter).
+- fix: ensure CORTEX_BASE_URL is available in publish workflow ([743579d](https://github.com/cortexapps/cli/commit/743579d760e900da693696df2841e7b710b08d39) by Jeff Schnitter).
+
## [1.2.0](https://github.com/cortexapps/cli/releases/tag/1.2.0) - 2025-11-04
[Compare with 1.1.0](https://github.com/cortexapps/cli/compare/1.1.0...1.2.0)
diff --git a/Justfile b/Justfile
index 2dca61a..de6a855 100644
--- a/Justfile
+++ b/Justfile
@@ -13,7 +13,7 @@ _setup:
# Run all tests
test-all: _setup test-import
- {{pytest}} -n auto -m "not setup" --html=report.html --self-contained-html --cov=cortexapps_cli --cov-append --cov-report term-missing tests
+ {{pytest}} -n auto --dist loadfile -m "not setup" --html=report.html --self-contained-html --cov=cortexapps_cli --cov-append --cov-report term-missing tests
# Run all tests serially - helpful to see if any tests seem to be hanging
_test-all-individual: test-import
diff --git a/cortexapps_cli/cli.py b/cortexapps_cli/cli.py
index 03d471d..d6f26a3 100755
--- a/cortexapps_cli/cli.py
+++ b/cortexapps_cli/cli.py
@@ -24,6 +24,8 @@
import cortexapps_cli.commands.discovery_audit as discovery_audit
import cortexapps_cli.commands.docs as docs
import cortexapps_cli.commands.entity_types as entity_types
+import cortexapps_cli.commands.entity_relationship_types as entity_relationship_types
+import cortexapps_cli.commands.entity_relationships as entity_relationships
import cortexapps_cli.commands.gitops_logs as gitops_logs
import cortexapps_cli.commands.groups as groups
import cortexapps_cli.commands.initiatives as initiatives
@@ -36,6 +38,7 @@
import cortexapps_cli.commands.rest as rest
import cortexapps_cli.commands.scim as scim
import cortexapps_cli.commands.scorecards as scorecards
+import cortexapps_cli.commands.secrets as secrets
import cortexapps_cli.commands.teams as teams
import cortexapps_cli.commands.workflows as workflows
@@ -58,6 +61,8 @@
app.add_typer(discovery_audit.app, name="discovery-audit")
app.add_typer(docs.app, name="docs")
app.add_typer(entity_types.app, name="entity-types")
+app.add_typer(entity_relationship_types.app, name="entity-relationship-types")
+app.add_typer(entity_relationships.app, name="entity-relationships")
app.add_typer(gitops_logs.app, name="gitops-logs")
app.add_typer(groups.app, name="groups")
app.add_typer(initiatives.app, name="initiatives")
@@ -70,6 +75,7 @@
app.add_typer(rest.app, name="rest")
app.add_typer(scim.app, name="scim")
app.add_typer(scorecards.app, name="scorecards")
+app.add_typer(secrets.app, name="secrets")
app.add_typer(teams.app, name="teams")
app.add_typer(workflows.app, name="workflows")
diff --git a/cortexapps_cli/commands/backup.py b/cortexapps_cli/commands/backup.py
index a09fdc8..f5245de 100644
--- a/cortexapps_cli/commands/backup.py
+++ b/cortexapps_cli/commands/backup.py
@@ -5,6 +5,10 @@
import typer
import json
import os
+import tempfile
+import sys
+from io import StringIO
+from contextlib import redirect_stdout, redirect_stderr
from rich import print, print_json
from rich.console import Console
from enum import Enum
@@ -14,6 +18,8 @@
import cortexapps_cli.commands.scorecards as scorecards
import cortexapps_cli.commands.catalog as catalog
import cortexapps_cli.commands.entity_types as entity_types
+import cortexapps_cli.commands.entity_relationship_types as entity_relationship_types
+import cortexapps_cli.commands.entity_relationships as entity_relationships
import cortexapps_cli.commands.ip_allowlist as ip_allowlist
import cortexapps_cli.commands.plugins as plugins
import cortexapps_cli.commands.workflows as workflows
@@ -93,15 +99,39 @@ def _export_entity_types(ctx, directory):
for definition in definitions_sorted:
tag = definition['type']
- json_string = json.dumps(definition, indent=4)
- _file_name(directory, tag, json_string, "json")
+ _file_name(directory, tag, definition, "json")
def _export_ip_allowlist(ctx, directory):
directory = _directory_name(directory, "ip-allowlist")
file = directory + "/ip-allowlist.json"
content = ip_allowlist.get(ctx, page=None, page_size=None, _print=False)
- _file_name(directory, "ip-allowlist", str(content), "json")
+ _file_name(directory, "ip-allowlist", content, "json")
+
+def _export_entity_relationship_types(ctx, directory):
+ directory = _directory_name(directory, "entity-relationship-types")
+
+ data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False)
+ relationship_types_sorted = sorted(data['relationshipTypes'], key=lambda x: x["tag"])
+
+ for rel_type in relationship_types_sorted:
+ tag = rel_type['tag']
+ _file_name(directory, tag, rel_type, "json")
+
+def _export_entity_relationships(ctx, directory):
+ directory = _directory_name(directory, "entity-relationships")
+
+ # First get all relationship types
+ rel_types_data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False)
+ rel_types = [rt['tag'] for rt in rel_types_data['relationshipTypes']]
+
+ # For each relationship type, export all relationships
+ for rel_type in sorted(rel_types):
+ data = entity_relationships.list(ctx, relationship_type=rel_type, page=None, page_size=250, _print=False)
+ relationships = data.get('relationships', [])
+
+ if relationships:
+ _file_name(directory, rel_type, relationships, "json")
def _export_plugins(ctx, directory):
directory = _directory_name(directory, "plugins")
@@ -179,6 +209,8 @@ def _export_workflows(ctx, directory):
backupTypes = {
"catalog",
"entity-types",
+ "entity-relationship-types",
+ "entity-relationships",
"ip-allowlist",
"plugins",
"scorecards",
@@ -226,6 +258,8 @@ def export(
Exports the following objects:
- catalog
- entity-types
+ - entity-relationship-types
+ - entity-relationships
- ip-allowlist
- plugins
- scorecards
@@ -240,14 +274,13 @@ def export(
cortex backup export --export-types catalog --catalog-types AWS::S3::Bucket
It does not back up everything in the tenant. For example these objects are not backed up:
- - api-keys
+ - api-keys
- custom-events
- custom-metadata created by the public API
- custom-metrics
- dependencies created by the API
- deploys
- docs created by the API
- - entity-relationships created by the API
- groups added by the API
- packages
- secrets
@@ -265,6 +298,10 @@ def export(
_export_catalog(ctx, directory, catalog_types)
if "entity-types" in export_types:
_export_entity_types(ctx, directory)
+ if "entity-relationship-types" in export_types:
+ _export_entity_relationship_types(ctx, directory)
+ if "entity-relationships" in export_types:
+ _export_entity_relationships(ctx, directory)
if "ip-allowlist" in export_types:
_export_ip_allowlist(ctx, directory)
if "plugins" in export_types:
@@ -278,22 +315,161 @@ def export(
print("Contents available in " + directory)
def _import_ip_allowlist(ctx, directory):
+ imported = 0
+ failed = []
if os.path.isdir(directory):
print("Processing: " + directory)
for filename in os.listdir(directory):
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path):
- print(" Importing: " + filename)
- ip_allowlist.replace(ctx, file_input=open(file_path), addresses=None, force=False, _print=False)
+ try:
+ print(" Importing: " + filename)
+ ip_allowlist.replace(ctx, file_input=open(file_path), addresses=None, force=False, _print=False)
+ imported += 1
+ except Exception as e:
+ print(f" Failed to import {filename}: {type(e).__name__} - {str(e)}")
+ failed.append((file_path, type(e).__name__, str(e)))
+ return ("ip-allowlist", imported, failed)
def _import_entity_types(ctx, force, directory):
+ imported = 0
+ failed = []
if os.path.isdir(directory):
print("Processing: " + directory)
for filename in sorted(os.listdir(directory)):
file_path = os.path.join(directory, filename)
if os.path.isfile(file_path):
- print(" Importing: " + filename)
- entity_types.create(ctx, file_input=open(file_path), force=force)
+ try:
+ print(" Importing: " + filename)
+ entity_types.create(ctx, file_input=open(file_path), force=force)
+ imported += 1
+ except Exception as e:
+ print(f" Failed to import {filename}: {type(e).__name__} - {str(e)}")
+ failed.append((file_path, type(e).__name__, str(e)))
+ return ("entity-types", imported, failed)
+
+def _import_entity_relationship_types(ctx, directory):
+ results = []
+ failed_count = 0
+
+ if os.path.isdir(directory):
+ print("Processing: " + directory)
+
+ # Get list of existing relationship types
+ existing_rel_types_data = entity_relationship_types.list(ctx, page=None, page_size=250, _print=False)
+ existing_tags = {rt['tag'] for rt in existing_rel_types_data.get('relationshipTypes', [])}
+
+ files = [(filename, os.path.join(directory, filename))
+ for filename in sorted(os.listdir(directory))
+ if os.path.isfile(os.path.join(directory, filename))]
+
+ def import_rel_type_file(file_info):
+ filename, file_path = file_info
+ tag = filename.replace('.json', '')
+ try:
+ # Check if relationship type already exists
+ if tag in existing_tags:
+ # Update existing relationship type
+ entity_relationship_types.update(ctx, tag=tag, file_input=open(file_path), _print=False)
+ else:
+ # Create new relationship type
+ entity_relationship_types.create(ctx, file_input=open(file_path), _print=False)
+ return (tag, file_path, None, None)
+ except typer.Exit as e:
+ return (tag, file_path, "HTTP", "Validation or HTTP error")
+ except Exception as e:
+ return (tag, file_path, type(e).__name__, str(e))
+
+ # Import all files in parallel
+ with ThreadPoolExecutor(max_workers=30) as executor:
+ futures = {executor.submit(import_rel_type_file, file_info): file_info[0] for file_info in files}
+ results = []
+ for future in as_completed(futures):
+ results.append(future.result())
+
+ # Print results in alphabetical order
+ failed_count = 0
+ for tag, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]):
+ if error_type:
+ print(f" Failed to import {tag}: {error_type} - {error_msg}")
+ failed_count += 1
+ else:
+ print(f" Importing: {tag}")
+
+ if failed_count > 0:
+ print(f"\n Total entity relationship type import failures: {failed_count}")
+
+ return ("entity-relationship-types", len(results) - failed_count, [(fp, et, em) for tag, fp, et, em in results if et])
+
+def _import_entity_relationships(ctx, directory):
+ results = []
+ failed_count = 0
+
+ if os.path.isdir(directory):
+ print("Processing: " + directory)
+
+ files = [(filename, os.path.join(directory, filename))
+ for filename in sorted(os.listdir(directory))
+ if os.path.isfile(os.path.join(directory, filename))]
+
+ def import_relationships_file(file_info):
+ filename, file_path = file_info
+ rel_type = filename.replace('.json', '')
+ try:
+ # Read the relationships file
+ with open(file_path) as f:
+ relationships = json.load(f)
+
+ # Convert list format to the format expected by update-bulk
+ # The export saves the raw relationships list, but update-bulk needs {"relationships": [...]}
+ if isinstance(relationships, list):
+ data = {"relationships": []}
+ for rel in relationships:
+ # Extract source and destination tags from sourceEntity and destinationEntity
+ source_tag = rel.get("sourceEntity", {}).get("tag")
+ dest_tag = rel.get("destinationEntity", {}).get("tag")
+ data["relationships"].append({
+ "source": source_tag,
+ "destination": dest_tag
+ })
+
+ # Use update-bulk to replace all relationships for this type
+ # Create a temporary file to pass the data
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as temp_file:
+ json.dump(data, temp_file)
+ temp_file_name = temp_file.name
+
+ try:
+ entity_relationships.update_bulk(ctx, relationship_type=rel_type, file_input=open(temp_file_name), force=True, _print=False)
+ finally:
+ os.unlink(temp_file_name)
+
+ return (rel_type, file_path, None, None)
+ except typer.Exit as e:
+ return (rel_type, file_path, "HTTP", "Validation or HTTP error")
+ except Exception as e:
+ return (rel_type, file_path, type(e).__name__, str(e))
+
+ # Import all files in parallel
+ with ThreadPoolExecutor(max_workers=30) as executor:
+ futures = {executor.submit(import_relationships_file, file_info): file_info[0] for file_info in files}
+ results = []
+ for future in as_completed(futures):
+ results.append(future.result())
+
+ # Print results in alphabetical order
+ failed_count = 0
+ for rel_type, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]):
+ if error_type:
+ print(f" Failed to import {rel_type}: {error_type} - {error_msg}")
+ failed_count += 1
+ else:
+ print(f" Importing: {rel_type}")
+
+ if failed_count > 0:
+ print(f"\n Total entity relationship import failures: {failed_count}")
+
+ return ("entity-relationships", len(results) - failed_count, [(fp, et, em) for rt, fp, et, em in results if et])
def _import_catalog(ctx, directory):
if os.path.isdir(directory):
@@ -304,12 +480,16 @@ def _import_catalog(ctx, directory):
def import_catalog_file(file_info):
filename, file_path = file_info
+ print(f" Importing: {filename}")
try:
with open(file_path) as f:
catalog.create(ctx, file_input=f, _print=False)
- return (filename, None)
+ return (filename, file_path, None, None)
+ except typer.Exit as e:
+ # typer.Exit is raised by the HTTP client on errors
+ return (filename, file_path, "HTTP", "Validation or HTTP error")
except Exception as e:
- return (filename, str(e))
+ return (filename, file_path, type(e).__name__, str(e))
# Import all files in parallel
with ThreadPoolExecutor(max_workers=30) as executor:
@@ -318,12 +498,13 @@ def import_catalog_file(file_info):
for future in as_completed(futures):
results.append(future.result())
- # Print results in alphabetical order
- for filename, error in sorted(results, key=lambda x: x[0]):
- if error:
- print(f" Failed to import {filename}: {error}")
- else:
- print(f" Importing: {filename}")
+ # Count failures
+ failed_count = sum(1 for filename, file_path, error_type, error_msg in results if error_type)
+
+ if failed_count > 0:
+ print(f"\n Total catalog import failures: {failed_count}")
+
+ return ("catalog", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et])
def _import_plugins(ctx, directory):
if os.path.isdir(directory):
@@ -337,9 +518,11 @@ def import_plugin_file(file_info):
try:
with open(file_path) as f:
plugins.create(ctx, file_input=f, force=True)
- return (filename, None)
+ return (filename, file_path, None, None)
+ except typer.Exit as e:
+ return (filename, file_path, "HTTP", "Validation or HTTP error")
except Exception as e:
- return (filename, str(e))
+ return (filename, file_path, type(e).__name__, str(e))
# Import all files in parallel
with ThreadPoolExecutor(max_workers=30) as executor:
@@ -349,12 +532,16 @@ def import_plugin_file(file_info):
results.append(future.result())
# Print results in alphabetical order
- for filename, error in sorted(results, key=lambda x: x[0]):
- if error:
- print(f" Failed to import {filename}: {error}")
+ failed_count = 0
+ for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]):
+ if error_type:
+ print(f" Failed to import {filename}: {error_type} - {error_msg}")
+ failed_count += 1
else:
print(f" Importing: {filename}")
+ return ("plugins", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et])
+
def _import_scorecards(ctx, directory):
if os.path.isdir(directory):
print("Processing: " + directory)
@@ -367,9 +554,11 @@ def import_scorecard_file(file_info):
try:
with open(file_path) as f:
scorecards.create(ctx, file_input=f, dry_run=False)
- return (filename, None)
+ return (filename, file_path, None, None)
+ except typer.Exit as e:
+ return (filename, file_path, "HTTP", "Validation or HTTP error")
except Exception as e:
- return (filename, str(e))
+ return (filename, file_path, type(e).__name__, str(e))
# Import all files in parallel
with ThreadPoolExecutor(max_workers=30) as executor:
@@ -379,12 +568,16 @@ def import_scorecard_file(file_info):
results.append(future.result())
# Print results in alphabetical order
- for filename, error in sorted(results, key=lambda x: x[0]):
- if error:
- print(f" Failed to import {filename}: {error}")
+ failed_count = 0
+ for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]):
+ if error_type:
+ print(f" Failed to import {filename}: {error_type} - {error_msg}")
+ failed_count += 1
else:
print(f" Importing: {filename}")
+ return ("scorecards", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et])
+
def _import_workflows(ctx, directory):
if os.path.isdir(directory):
print("Processing: " + directory)
@@ -397,9 +590,11 @@ def import_workflow_file(file_info):
try:
with open(file_path) as f:
workflows.create(ctx, file_input=f)
- return (filename, None)
+ return (filename, file_path, None, None)
+ except typer.Exit as e:
+ return (filename, file_path, "HTTP", "Validation or HTTP error")
except Exception as e:
- return (filename, str(e))
+ return (filename, file_path, type(e).__name__, str(e))
# Import all files in parallel
with ThreadPoolExecutor(max_workers=30) as executor:
@@ -409,12 +604,16 @@ def import_workflow_file(file_info):
results.append(future.result())
# Print results in alphabetical order
- for filename, error in sorted(results, key=lambda x: x[0]):
- if error:
- print(f" Failed to import {filename}: {error}")
+ failed_count = 0
+ for filename, file_path, error_type, error_msg in sorted(results, key=lambda x: x[0]):
+ if error_type:
+ print(f" Failed to import {filename}: {error_type} - {error_msg}")
+ failed_count += 1
else:
print(f" Importing: {filename}")
+ return ("workflows", len(results) - failed_count, [(fp, et, em) for fn, fp, et, em in results if et])
+
@app.command("import")
def import_tenant(
ctx: typer.Context,
@@ -427,9 +626,67 @@ def import_tenant(
client = ctx.obj["client"]
- _import_ip_allowlist(ctx, directory + "/ip-allowlist")
- _import_entity_types(ctx, force, directory + "/entity-types")
- _import_catalog(ctx, directory + "/catalog")
- _import_plugins(ctx, directory + "/plugins")
- _import_scorecards(ctx, directory + "/scorecards")
- _import_workflows(ctx, directory + "/workflows")
+ # Collect statistics from each import
+ all_stats = []
+ all_stats.append(_import_ip_allowlist(ctx, directory + "/ip-allowlist"))
+ all_stats.append(_import_entity_types(ctx, force, directory + "/entity-types"))
+ all_stats.append(_import_entity_relationship_types(ctx, directory + "/entity-relationship-types"))
+ all_stats.append(_import_catalog(ctx, directory + "/catalog"))
+ all_stats.append(_import_entity_relationships(ctx, directory + "/entity-relationships"))
+ all_stats.append(_import_plugins(ctx, directory + "/plugins"))
+ all_stats.append(_import_scorecards(ctx, directory + "/scorecards"))
+ all_stats.append(_import_workflows(ctx, directory + "/workflows"))
+
+ # Print summary
+ print("\n" + "="*80)
+ print("IMPORT SUMMARY")
+ print("="*80)
+
+ total_imported = 0
+ total_failed = 0
+ all_failures = []
+
+ for import_type, imported, failed in all_stats:
+ if imported > 0 or len(failed) > 0:
+ total_imported += imported
+ total_failed += len(failed)
+ print(f"\n{import_type}:")
+ print(f" Imported: {imported}")
+ if len(failed) > 0:
+ print(f" Failed: {len(failed)}")
+ all_failures.extend([(import_type, f, e, m) for f, e, m in failed])
+
+ print(f"\nTOTAL: {total_imported} imported, {total_failed} failed")
+
+ if len(all_failures) > 0:
+ print("\n" + "="*80)
+ print("FAILED IMPORTS")
+ print("="*80)
+ print("\nThe following files failed to import:\n")
+
+ for import_type, file_path, error_type, error_msg in all_failures:
+ print(f" {file_path}")
+ print(f" Error: {error_type} - {error_msg}")
+
+ print("\n" + "="*80)
+ print("RETRY COMMANDS")
+ print("="*80)
+ print("\nTo retry failed imports, run these commands:\n")
+
+ for import_type, file_path, error_type, error_msg in all_failures:
+ if import_type == "catalog":
+ print(f"cortex catalog create -f \"{file_path}\"")
+ elif import_type == "entity-types":
+ print(f"cortex entity-types create --force -f \"{file_path}\"")
+ elif import_type == "entity-relationship-types":
+ tag = os.path.basename(file_path).replace('.json', '')
+ print(f"cortex entity-relationship-types create -f \"{file_path}\"")
+ elif import_type == "entity-relationships":
+ # These need special handling - would need the relationship type
+ print(f"# Manual retry needed for entity-relationships: {file_path}")
+ elif import_type == "plugins":
+ print(f"cortex plugins create --force -f \"{file_path}\"")
+ elif import_type == "scorecards":
+ print(f"cortex scorecards create -f \"{file_path}\"")
+ elif import_type == "workflows":
+ print(f"cortex workflows create -f \"{file_path}\"")
diff --git a/cortexapps_cli/commands/entity_relationship_types.py b/cortexapps_cli/commands/entity_relationship_types.py
new file mode 100644
index 0000000..692256a
--- /dev/null
+++ b/cortexapps_cli/commands/entity_relationship_types.py
@@ -0,0 +1,121 @@
+import typer
+import json
+from typing_extensions import Annotated
+from cortexapps_cli.utils import print_output_with_context
+from cortexapps_cli.command_options import CommandOptions, ListCommandOptions
+
+app = typer.Typer(
+ help="Entity Relationship Types commands",
+ no_args_is_help=True
+)
+
+@app.command()
+def list(
+ ctx: typer.Context,
+ _print: CommandOptions._print = True,
+ page: ListCommandOptions.page = None,
+ page_size: ListCommandOptions.page_size = 250,
+ table_output: ListCommandOptions.table_output = False,
+ csv_output: ListCommandOptions.csv_output = False,
+ columns: ListCommandOptions.columns = [],
+ no_headers: ListCommandOptions.no_headers = False,
+ filters: ListCommandOptions.filters = [],
+ sort: ListCommandOptions.sort = [],
+):
+ """
+ List entity relationship types
+ """
+ client = ctx.obj["client"]
+
+ params = {
+ "page": page,
+ "pageSize": page_size
+ }
+
+ if (table_output or csv_output) and not ctx.params.get('columns'):
+ ctx.params['columns'] = [
+ "Tag=tag",
+ "Name=name",
+ "Description=description",
+ ]
+
+ # remove any params that are None
+ params = {k: v for k, v in params.items() if v is not None}
+
+ if page is None:
+ r = client.fetch("api/v1/relationship-types", params=params)
+ else:
+ r = client.get("api/v1/relationship-types", params=params)
+
+ if _print:
+ print_output_with_context(ctx, r)
+ else:
+ return r
+
+@app.command()
+def get(
+ ctx: typer.Context,
+ tag: str = typer.Option(..., "--tag", "-t", help="Relationship type tag"),
+):
+ """
+ Get a relationship type by tag
+ """
+ client = ctx.obj["client"]
+ r = client.get(f"api/v1/relationship-types/{tag}")
+ print_output_with_context(ctx, r)
+
+@app.command()
+def create(
+ ctx: typer.Context,
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationship type definition; can be passed as stdin with -, example: -f-")] = ...,
+ _print: CommandOptions._print = True,
+):
+ """
+ Create a relationship type
+
+ Provide a JSON file with the relationship type definition including required fields:
+ - tag: unique identifier
+ - name: human-readable name
+ - definitionLocation: SOURCE, DESTINATION, or BOTH
+ - allowCycles: boolean
+ - createCatalog: boolean
+ - isSingleSource: boolean
+ - isSingleDestination: boolean
+ - sourcesFilter: object with include/types configuration
+ - destinationsFilter: object with include/types configuration
+ - inheritances: array of inheritance settings
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+ r = client.post("api/v1/relationship-types", data=data)
+ if _print:
+ print_output_with_context(ctx, r)
+
+@app.command()
+def update(
+ ctx: typer.Context,
+ tag: str = typer.Option(..., "--tag", "-t", help="Relationship type tag"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationship type definition; can be passed as stdin with -, example: -f-")] = ...,
+ _print: CommandOptions._print = True,
+):
+ """
+ Update a relationship type
+
+ Provide a JSON file with the relationship type definition to update.
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+ r = client.put(f"api/v1/relationship-types/{tag}", data=data)
+ if _print:
+ print_output_with_context(ctx, r)
+
+@app.command()
+def delete(
+ ctx: typer.Context,
+ tag: str = typer.Option(..., "--tag", "-t", help="Relationship type tag"),
+):
+ """
+ Delete a relationship type
+ """
+ client = ctx.obj["client"]
+ client.delete(f"api/v1/relationship-types/{tag}")
diff --git a/cortexapps_cli/commands/entity_relationships.py b/cortexapps_cli/commands/entity_relationships.py
new file mode 100644
index 0000000..e6ebcba
--- /dev/null
+++ b/cortexapps_cli/commands/entity_relationships.py
@@ -0,0 +1,222 @@
+import typer
+import json
+from typing_extensions import Annotated
+from cortexapps_cli.utils import print_output_with_context
+from cortexapps_cli.command_options import CommandOptions, ListCommandOptions
+
+app = typer.Typer(
+ help="Entity Relationships commands (Beta)",
+ no_args_is_help=True
+)
+
+@app.command()
+def list(
+ ctx: typer.Context,
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ _print: CommandOptions._print = True,
+ page: ListCommandOptions.page = None,
+ page_size: ListCommandOptions.page_size = 250,
+ table_output: ListCommandOptions.table_output = False,
+ csv_output: ListCommandOptions.csv_output = False,
+ columns: ListCommandOptions.columns = [],
+ no_headers: ListCommandOptions.no_headers = False,
+ filters: ListCommandOptions.filters = [],
+ sort: ListCommandOptions.sort = [],
+):
+ """
+ List all relationships for a given relationship type
+ """
+ client = ctx.obj["client"]
+
+ params = {
+ "page": page,
+ "pageSize": page_size
+ }
+
+ if (table_output or csv_output) and not ctx.params.get('columns'):
+ ctx.params['columns'] = [
+ "Source=source.tag",
+ "Destination=destination.tag",
+ "Provider=providerType",
+ ]
+
+ # remove any params that are None
+ params = {k: v for k, v in params.items() if v is not None}
+
+ if page is None:
+ r = client.fetch(f"api/v1/relationships/{relationship_type}", params=params)
+ else:
+ r = client.get(f"api/v1/relationships/{relationship_type}", params=params)
+
+ if _print:
+ print_output_with_context(ctx, r)
+ else:
+ return r
+
+@app.command()
+def list_destinations(
+ ctx: typer.Context,
+ entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"),
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ depth: int = typer.Option(1, "--depth", "-d", help="Maximum hierarchy depth"),
+ include_archived: bool = typer.Option(False, "--include-archived", help="Include archived entities"),
+):
+ """
+ List destination entities for a given source entity and relationship type
+ """
+ client = ctx.obj["client"]
+
+ params = {
+ "depth": depth,
+ "includeArchived": include_archived
+ }
+
+ r = client.get(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/destinations", params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def list_sources(
+ ctx: typer.Context,
+ entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"),
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ depth: int = typer.Option(1, "--depth", "-d", help="Maximum hierarchy depth"),
+ include_archived: bool = typer.Option(False, "--include-archived", help="Include archived entities"),
+):
+ """
+ List source entities for a given destination entity and relationship type
+ """
+ client = ctx.obj["client"]
+
+ params = {
+ "depth": depth,
+ "includeArchived": include_archived
+ }
+
+ r = client.get(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/sources", params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def add_destinations(
+ ctx: typer.Context,
+ entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"),
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing destinations array; can be passed as stdin with -, example: -f-")] = ...,
+ force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"),
+):
+ """
+ Add destination entities for a given source entity
+
+ Provide a JSON file with: {"destinations": ["entity-1", "entity-2"]}
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+
+ params = {"force": force} if force else {}
+
+ r = client.post(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/destinations", data=data, params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def add_sources(
+ ctx: typer.Context,
+ entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"),
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing sources array; can be passed as stdin with -, example: -f-")] = ...,
+ force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"),
+):
+ """
+ Add source entities for a given destination entity
+
+ Provide a JSON file with: {"sources": ["entity-1", "entity-2"]}
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+
+ params = {"force": force} if force else {}
+
+ r = client.post(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/sources", data=data, params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def update_destinations(
+ ctx: typer.Context,
+ entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"),
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing destinations array; can be passed as stdin with -, example: -f-")] = ...,
+ force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"),
+):
+ """
+ Replace all destination entities for a given source entity
+
+ Provide a JSON file with: {"destinations": ["entity-1", "entity-2"]}
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+
+ params = {"force": force} if force else {}
+
+ r = client.put(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/destinations", data=data, params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def update_sources(
+ ctx: typer.Context,
+ entity_tag: str = typer.Option(..., "--entity-tag", "-e", help="Entity tag or ID"),
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing sources array; can be passed as stdin with -, example: -f-")] = ...,
+ force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"),
+):
+ """
+ Replace all source entities for a given destination entity
+
+ Provide a JSON file with: {"sources": ["entity-1", "entity-2"]}
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+
+ params = {"force": force} if force else {}
+
+ r = client.put(f"api/v1/catalog/{entity_tag}/relationships/{relationship_type}/sources", data=data, params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def add_bulk(
+ ctx: typer.Context,
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationships array; can be passed as stdin with -, example: -f-")] = ...,
+ force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"),
+):
+ """
+ Add multiple relationships in bulk
+
+ Provide a JSON file with: {"relationships": [{"source": "tag1", "destination": "tag2"}]}
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+
+ params = {"force": force} if force else {}
+
+ r = client.post(f"api/v1/relationships/{relationship_type}", data=data, params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def update_bulk(
+ ctx: typer.Context,
+ relationship_type: str = typer.Option(..., "--relationship-type", "-r", help="Relationship type tag"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing relationships array; can be passed as stdin with -, example: -f-")] = ...,
+ force: bool = typer.Option(False, "--force", help="Override catalog descriptor values"),
+ _print: CommandOptions._print = True,
+):
+ """
+ Replace all relationships for a given relationship type
+
+ Provide a JSON file with: {"relationships": [{"source": "tag1", "destination": "tag2"}]}
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+
+ params = {"force": force} if force else {}
+
+ r = client.put(f"api/v1/relationships/{relationship_type}", data=data, params=params)
+ if _print:
+ print_output_with_context(ctx, r)
diff --git a/cortexapps_cli/commands/secrets.py b/cortexapps_cli/commands/secrets.py
new file mode 100644
index 0000000..53162fc
--- /dev/null
+++ b/cortexapps_cli/commands/secrets.py
@@ -0,0 +1,105 @@
+import typer
+import json
+from typing_extensions import Annotated
+from cortexapps_cli.utils import print_output_with_context
+from cortexapps_cli.command_options import ListCommandOptions
+
+app = typer.Typer(
+ help="Secrets commands",
+ no_args_is_help=True
+)
+
+@app.command()
+def list(
+ ctx: typer.Context,
+ page: ListCommandOptions.page = None,
+ page_size: ListCommandOptions.page_size = 250,
+ table_output: ListCommandOptions.table_output = False,
+ csv_output: ListCommandOptions.csv_output = False,
+ columns: ListCommandOptions.columns = [],
+ no_headers: ListCommandOptions.no_headers = False,
+ filters: ListCommandOptions.filters = [],
+ sort: ListCommandOptions.sort = [],
+):
+ """
+ List secrets
+ """
+ client = ctx.obj["client"]
+
+ params = {
+ "page": page,
+ "pageSize": page_size
+ }
+
+ if (table_output or csv_output) and not ctx.params.get('columns'):
+ ctx.params['columns'] = [
+ "ID=id",
+ "Name=name",
+ "Tag=tag",
+ ]
+
+ # remove any params that are None
+ params = {k: v for k, v in params.items() if v is not None}
+
+ if page is None:
+ r = client.fetch("api/v1/secrets", params=params)
+ else:
+ r = client.get("api/v1/secrets", params=params)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def get(
+ ctx: typer.Context,
+ tag_or_id: str = typer.Option(..., "--tag-or-id", "-t", help="Secret tag or ID"),
+):
+ """
+ Get a secret by tag or ID
+ """
+ client = ctx.obj["client"]
+ r = client.get(f"api/v1/secrets/{tag_or_id}")
+ print_output_with_context(ctx, r)
+
+@app.command()
+def create(
+ ctx: typer.Context,
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing secret definition (name, secret, tag); can be passed as stdin with -, example: -f-")] = ...,
+):
+ """
+ Create a secret
+
+ Provide a JSON file with the secret definition including required fields:
+ - name: human-readable label for the secret
+ - secret: the actual secret value
+ - tag: unique identifier for the secret
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+ r = client.post("api/v1/secrets", data=data)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def update(
+ ctx: typer.Context,
+ tag_or_id: str = typer.Option(..., "--tag-or-id", "-t", help="Secret tag or ID"),
+ file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing fields to update (name, secret); can be passed as stdin with -, example: -f-")] = ...,
+):
+ """
+ Update a secret
+
+ Provide a JSON file with the fields to update (name and/or secret are optional).
+ """
+ client = ctx.obj["client"]
+ data = json.loads("".join([line for line in file_input]))
+ r = client.put(f"api/v1/secrets/{tag_or_id}", data=data)
+ print_output_with_context(ctx, r)
+
+@app.command()
+def delete(
+ ctx: typer.Context,
+ tag_or_id: str = typer.Option(..., "--tag-or-id", "-t", help="Secret tag or ID"),
+):
+ """
+ Delete a secret
+ """
+ client = ctx.obj["client"]
+ client.delete(f"api/v1/secrets/{tag_or_id}")
diff --git a/data/import/scorecards/cli-test-evaluation-scorecard.yaml b/data/import/scorecards/cli-test-evaluation-scorecard.yaml
new file mode 100644
index 0000000..2524796
--- /dev/null
+++ b/data/import/scorecards/cli-test-evaluation-scorecard.yaml
@@ -0,0 +1,21 @@
+tag: cli-test-evaluation-scorecard
+name: CLI Test Evaluation Scorecard
+description: Used to test Cortex CLI trigger-evaluation command
+draft: false
+ladder:
+ name: Default Ladder
+ levels:
+ - name: You Made It
+ rank: 1
+ description: "My boring description"
+ color: 7cf376
+rules:
+- title: Has Custom Data
+ expression: custom("testField") != null
+ weight: 1
+ level: You Made It
+ filter:
+ category: SERVICE
+filter:
+ query: 'entity.tag() == "cli-test-service"'
+ category: SERVICE
diff --git a/data/run-time/secret-create.json b/data/run-time/secret-create.json
new file mode 100644
index 0000000..f4e803e
--- /dev/null
+++ b/data/run-time/secret-create.json
@@ -0,0 +1,5 @@
+{
+ "tag": "cli_test_secret",
+ "name": "CLI Test Secret",
+ "secret": "test-secret-value-12345"
+}
diff --git a/data/run-time/secret-update.json b/data/run-time/secret-update.json
new file mode 100644
index 0000000..e28781b
--- /dev/null
+++ b/data/run-time/secret-update.json
@@ -0,0 +1,4 @@
+{
+ "name": "Updated CLI Test Secret",
+ "secret": "updated-secret-value-67890"
+}
diff --git a/tests/test_deploys.py b/tests/test_000_deploys.py
similarity index 100%
rename from tests/test_deploys.py
rename to tests/test_000_deploys.py
diff --git a/tests/test_custom_events_list.py b/tests/test_custom_events_list.py
index cc0a555..3cef50f 100644
--- a/tests/test_custom_events_list.py
+++ b/tests/test_custom_events_list.py
@@ -1,7 +1,8 @@
from tests.helpers.utils import *
def test():
- cli(["custom-events", "delete-all", "-t", "cli-test-service", "-y", "VALIDATE_SERVICE"])
+ # Delete all event types to ensure clean state (not just VALIDATE_SERVICE)
+ cli(["custom-events", "delete-all", "-t", "cli-test-service"])
cli(["custom-events", "create", "-t", "cli-test-service", "-f", "data/run-time/custom-events.json"])
result = cli(["custom-events", "list", "-t", "cli-test-service"])
diff --git a/tests/test_scorecards.py b/tests/test_scorecards.py
index 801f556..7b5c991 100644
--- a/tests/test_scorecards.py
+++ b/tests/test_scorecards.py
@@ -4,25 +4,13 @@
# Get rule id to be used in exemption tests.
# TODO: check for and revoke any PENDING exemptions.
-@mock.patch.dict(os.environ, {"CORTEX_API_KEY": os.environ['CORTEX_API_KEY']})
def _get_rule(title):
response = cli(["scorecards", "get", "-s", "cli-test-scorecard"])
rule_id = [rule['identifier'] for rule in response['scorecard']['rules'] if rule['title'] == title]
return rule_id[0]
def test_scorecards():
- # Retry scorecard create in case there's an active evaluation
- # (can happen if test_import.py just triggered an evaluation)
- max_retries = 3
- for attempt in range(max_retries):
- try:
- cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-scorecard.yaml"])
- break
- except Exception as e:
- if "500" in str(e) and attempt < max_retries - 1:
- time.sleep(2 ** attempt) # Exponential backoff: 1s, 2s
- continue
- raise
+ cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-scorecard.yaml"])
response = cli(["scorecards", "list"])
assert any(scorecard['tag'] == 'cli-test-scorecard' for scorecard in response['scorecards']), "Should find scorecard with tag cli-test-scorecard"
@@ -39,33 +27,30 @@ def test_scorecards():
# cannot rely on a scorecard evaluation being complete, so not performing any validation
cli(["scorecards", "next-steps", "-s", "cli-test-scorecard", "-t", "cli-test-service"])
- # Test trigger-evaluation command (accepts both success and 409 Already evaluating)
- response = cli(["scorecards", "trigger-evaluation", "-s", "cli-test-scorecard", "-e", "cli-test-service"], return_type=ReturnType.STDOUT)
- assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response), \
- "Should receive success message or 409 Already evaluating error"
-
# cannot rely on a scorecard evaluation being complete, so not performing any validation
#response = cli(["scorecards", "scores", "-s", "cli-test-scorecard", "-t", "cli-test-service"])
#assert response['scorecardTag'] == "cli-test-scorecard", "Should get valid response that include cli-test-scorecard"
-
+
# # Not sure if we can run this cli right away. Newly-created Scorecard might not be evaluated yet.
# # 2024-05-06, additionally now blocked by CET-8882
# # cli(["scorecards", "scores", "-t", "cli-test-scorecard", "-e", "cli-test-service"])
#
# cli(["scorecards", "scores", "-t", "cli-test-scorecard"])
-
+
+def test_scorecard_trigger_evaluation():
+ # Create a dedicated scorecard for trigger-evaluation testing to avoid conflicts with import
+ cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-evaluation-scorecard.yaml"])
+
+ # Test trigger-evaluation command (accepts both success and 409 Already evaluating)
+ response = cli(["scorecards", "trigger-evaluation", "-s", "cli-test-evaluation-scorecard", "-e", "cli-test-service"], return_type=ReturnType.STDOUT)
+ assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response), \
+ "Should receive success message or 409 Already evaluating error"
+
+ # Clean up
+ cli(["scorecards", "delete", "-s", "cli-test-evaluation-scorecard"])
+
def test_scorecards_drafts():
- # Retry scorecard create in case there's an active evaluation
- max_retries = 3
- for attempt in range(max_retries):
- try:
- cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-draft-scorecard.yaml"])
- break
- except Exception as e:
- if "500" in str(e) and attempt < max_retries - 1:
- time.sleep(2 ** attempt) # Exponential backoff: 1s, 2s
- continue
- raise
+ cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-draft-scorecard.yaml"])
response = cli(["scorecards", "list", "-s"])
assert any(scorecard['tag'] == 'cli-test-draft-scorecard' for scorecard in response['scorecards'])
@@ -80,7 +65,10 @@ def test_scorecards_drafts():
# testing assumes no tenanted data, so this condition needs to be created as part of the test
#
# - there is no public API to force evaluation of a scorecard; can look into possibility of using
-# an internal endpoint for this
+# an internal endpoint for this
+#
+# Nov 2025 - there is a public API to force evaluation of a scorecard for an entity, but there is
+# not a way to determine when the evaluation completes.
#
# - could create a scorecard as part of the test and wait for it to complete, but completion time for
# evaluating a scorecard is non-deterministic and, as experienced with query API tests, completion
@@ -96,6 +84,7 @@ def test_scorecards_drafts():
# So this is how we'll roll for now . . .
# - Automated tests currently run in known tenants that have the 'cli-test-scorecard' in an evaluated state.
# - So we can semi-reliably count on an evaluated scorecard to exist.
+# - However, we should be cleaning up test data after tests run which would invalidate these assumptions.
@pytest.fixture(scope='session')
@mock.patch.dict(os.environ, {"CORTEX_API_KEY": os.environ['CORTEX_API_KEY_VIEWER']})
diff --git a/tests/test_secrets.py b/tests/test_secrets.py
new file mode 100644
index 0000000..6145c21
--- /dev/null
+++ b/tests/test_secrets.py
@@ -0,0 +1,42 @@
+from tests.helpers.utils import *
+import pytest
+
+def test():
+ # Skip test if API key doesn't have secrets permissions
+ # The Secrets API may require special permissions or may not be available in all environments
+ try:
+ # Try to list secrets first to check if we have permission
+ response = cli(["secrets", "list"], return_type=ReturnType.RAW)
+ if response.exit_code != 0 and "403" in response.stdout:
+ pytest.skip("API key does not have permission to access Secrets API")
+ except Exception as e:
+ if "403" in str(e) or "Forbidden" in str(e):
+ pytest.skip("API key does not have permission to access Secrets API")
+
+ # Create a secret
+ response = cli(["secrets", "create", "-f", "data/run-time/secret-create.json"])
+ assert response['tag'] == 'cli_test_secret', "Should create secret with tag cli_test_secret"
+ assert response['name'] == 'CLI Test Secret', "Should have correct name"
+
+ # List secrets and verify it exists
+ response = cli(["secrets", "list"])
+ assert any(secret['tag'] == 'cli_test_secret' for secret in response['secrets']), "Should find secret with tag cli_test_secret"
+
+ # Get the secret
+ response = cli(["secrets", "get", "-t", "cli_test_secret"])
+ assert response['tag'] == 'cli_test_secret', "Should get secret with correct tag"
+ assert response['name'] == 'CLI Test Secret', "Should have correct name"
+
+ # Update the secret
+ cli(["secrets", "update", "-t", "cli_test_secret", "-f", "data/run-time/secret-update.json"])
+
+ # Verify the update
+ response = cli(["secrets", "get", "-t", "cli_test_secret"])
+ assert response['name'] == 'Updated CLI Test Secret', "Should have updated name"
+
+ # Delete the secret
+ cli(["secrets", "delete", "-t", "cli_test_secret"])
+
+ # Verify deletion by checking list
+ response = cli(["secrets", "list"])
+ assert not any(secret['tag'] == 'cli_test_secret' for secret in response['secrets']), "Should not find deleted secret"