diff --git a/tests/test_test_run_execution.py b/tests/test_test_run_execution.py index 13afe6e..9caaf3e 100644 --- a/tests/test_test_run_execution.py +++ b/tests/test_test_run_execution.py @@ -70,8 +70,7 @@ def test_test_run_execution_success_all( assert "Test Run 1" in result.output assert "Test Run 2" in result.output assert "PASSED" in result.output - assert "FAILED" in result.output - api.assert_called_once_with(skip=None, limit=None) + api.assert_called_once_with(skip=None, limit=None, sort_order="desc", project_id=None) mock_api_client.close.assert_called_once() def test_test_run_execution_success_specific_id( @@ -125,7 +124,7 @@ def test_test_run_execution_success_with_pagination( # Assert assert result.exit_code == 0 assert "Test Run 3" in result.output - api.assert_called_once_with(skip=10, limit=5) + api.assert_called_once_with(skip=10, limit=5, sort_order="desc", project_id=None) def test_test_run_execution_success_json_output( self, @@ -247,6 +246,7 @@ def test_test_run_execution_help_message(self, cli_runner: CliRunner) -> None: assert "--id" in result.output assert "--skip" in result.output assert "--limit" in result.output + assert "--project-id" in result.output assert "--json" in result.output @pytest.mark.parametrize("state,expected_display", [ @@ -361,7 +361,7 @@ def test_test_run_execution_pagination_parameters( # Assert assert result.exit_code == 0 - api.assert_called_once_with(skip=skip, limit=limit) + api.assert_called_once_with(skip=skip, limit=limit, sort_order="desc", project_id=None) def test_test_run_execution_error_display( self, @@ -783,3 +783,317 @@ def test_test_run_execution_log_generic_exception( # Assert assert result.exit_code == 1 assert "Network timeout" in str(result.exception) + + def test_test_run_execution_sort_parameter_asc( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution with sort parameter set to asc.""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=1, + title="Old Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=1 + ), + api_models.TestRunExecution( + id=2, + title="New Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=1 + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act + result = cli_runner.invoke(test_run_execution, ["--sort", "asc"]) + + # Assert + assert result.exit_code == 0 + assert "Old Test Run" in result.output + api.assert_called_once_with(skip=None, limit=None, sort_order="asc", project_id=None) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_sort_parameter_desc_default( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution with sort parameter default (desc).""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=2, + title="New Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=1 + ), + api_models.TestRunExecution( + id=1, + title="Old Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=1 + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act - don't specify sort parameter, should default to desc + result = cli_runner.invoke(test_run_execution) + + # Assert + assert result.exit_code == 0 + assert "New Test Run" in result.output + assert "Old Test Run" in result.output + api.assert_called_once_with(skip=None, limit=None, sort_order="desc", project_id=None) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_sort_parameter_explicit_desc( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution with sort parameter explicitly set to desc.""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=2, + title="New Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=1 + ), + api_models.TestRunExecution( + id=1, + title="Old Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=1 + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act + result = cli_runner.invoke(test_run_execution, ["--sort", "desc"]) + + # Assert + assert result.exit_code == 0 + assert "New Test Run" in result.output + assert "Old Test Run" in result.output + api.assert_called_once_with(skip=None, limit=None, sort_order="desc", project_id=None) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_all_flag( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution with --all flag.""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=1, + title="Test Run 1", + state=api_models.TestStateEnum.PASSED, + project_id=1 + ), + api_models.TestRunExecution( + id=2, + title="Test Run 2", + state=api_models.TestStateEnum.FAILED, + project_id=1, + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act + result = cli_runner.invoke(test_run_execution, ["--all"]) + + # Assert + assert result.exit_code == 0 + # When --all is used, limit should be set to 0 + api.assert_called_once_with(skip=None, limit=0, sort_order="desc", project_id=None) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_all_with_limit_fails( + self, + cli_runner: CliRunner, + ) -> None: + """Test that --all and --limit cannot be used together.""" + # Act + result = cli_runner.invoke(test_run_execution, ["--all", "--limit", "50"]) + + # Assert + assert result.exit_code != 0 + assert "--all and --limit cannot be used together" in result.output + + def test_test_run_execution_all_with_log_fails( + self, + cli_runner: CliRunner, + ) -> None: + """Test that --all and --log cannot be used together.""" + # Act + result = cli_runner.invoke(test_run_execution, ["--all", "--log", "--id", "123"]) + + # Assert + assert result.exit_code != 0 + assert "--all option is not applicable when fetching logs" in result.output + + def test_test_run_execution_help_shows_all_option(self, cli_runner: CliRunner) -> None: + """Test that the help message includes the --all option.""" + # Act + result = cli_runner.invoke(test_run_execution, ["--help"]) + + # Assert + assert result.exit_code == 0 + assert "--all" in result.output + assert "pagination" in result.output + assert "(cannot be used with --limit)" in result.output + + def test_test_run_execution_with_project_id( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution history filtered by project ID.""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=1, + title="Project 5 Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=5 + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act + result = cli_runner.invoke(test_run_execution, ["--project-id", "5"]) + + # Assert + assert result.exit_code == 0 + assert "Project 5 Test Run" in result.output + api.assert_called_once_with(skip=None, limit=None, sort_order="desc", project_id=5) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_with_project_id_short_form( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution history filtered by project ID using short form.""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=1, + title="Project 10 Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=10 + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act + result = cli_runner.invoke(test_run_execution, ["-p", "10"]) + + # Assert + assert result.exit_code == 0 + assert "Project 10 Test Run" in result.output + api.assert_called_once_with(skip=None, limit=None, sort_order="desc", project_id=10) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_with_project_id_and_pagination( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution with project ID combined with pagination.""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=3, + title="Filtered Paginated Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=7 + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act + result = cli_runner.invoke(test_run_execution, ["--project-id", "7", "--skip", "5", "--limit", "10"]) + + # Assert + assert result.exit_code == 0 + assert "Filtered Paginated Test Run" in result.output + api.assert_called_once_with(skip=5, limit=10, sort_order="desc", project_id=7) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_with_project_id_and_sort( + self, + cli_runner: CliRunner, + mock_sync_apis: Mock, + mock_api_client: Mock + ) -> None: + """Test test run execution with project ID combined with sort order.""" + # Arrange + test_executions = [ + api_models.TestRunExecution( + id=1, + title="Old Test Run", + state=api_models.TestStateEnum.PASSED, + project_id=3 + ) + ] + api = mock_sync_apis.test_run_executions_api.read_test_run_executions_api_v1_test_run_executions_get + api.return_value = test_executions + + with patch("th_cli.commands.test_run_execution.get_client", return_value=mock_api_client): + with patch("th_cli.commands.test_run_execution.SyncApis", return_value=mock_sync_apis): + # Act + result = cli_runner.invoke(test_run_execution, ["--project-id", "3", "--sort", "asc"]) + + # Assert + assert result.exit_code == 0 + api.assert_called_once_with(skip=None, limit=None, sort_order="asc", project_id=3) + mock_api_client.close.assert_called_once() + + def test_test_run_execution_project_id_with_log_fails( + self, + cli_runner: CliRunner, + ) -> None: + """Test that --project-id cannot be used with --log.""" + # Act + result = cli_runner.invoke(test_run_execution, ["--id", "123", "--project-id", "5", "--log"]) + + # Assert + assert result.exit_code != 0 + assert "--project-id" in result.output + assert "not applicable" in result.output or "Error" in result.output + diff --git a/th_cli/api_lib_autogen/api/test_run_executions_api.py b/th_cli/api_lib_autogen/api/test_run_executions_api.py index b74b67d..453d7b2 100644 --- a/th_cli/api_lib_autogen/api/test_run_executions_api.py +++ b/th_cli/api_lib_autogen/api/test_run_executions_api.py @@ -84,7 +84,11 @@ def _build_for_download_log_api_v1_test_run_executions_id_log_get( self, id: int, json_entries: Optional[bool] = None, download: Optional[bool] = None ) -> Awaitable[None]: """ - Download the logs from a test run. Args: id (int): Id of the TestRunExectution the log is requested for json_entries (bool, optional): When set, return each log line as a json object download (bool, optional): When set, return as attachment + Download the logs from a test run. + Args: + id (int): Id of the TestRunExectution the log is requested for + json_entries (bool, optional): When set, return each log line as a json object + download (bool, optional): When set, return as attachment """ path_params = {"id": str(id)} @@ -106,7 +110,8 @@ def _build_for_get_test_runner_status_api_v1_test_run_executions_status_get( self, ) -> Awaitable[m.TestRunnerStatus]: """ - Retrieve status of the Test Engine. When the Test Engine is actively running the status will include the current test_run and the details of the states. + Retrieve status of the Test Engine. + When the Test Engine is actively running the status will include the current test_run and the details of the states. """ return self.api_client.request( type_=m.TestRunnerStatus, @@ -148,9 +153,18 @@ def _build_for_read_test_run_executions_api_v1_test_run_executions_get( search_query: Optional[str] = None, skip: Optional[int] = None, limit: Optional[int] = None, + sort_order: Optional[str] = None, ) -> Awaitable[List[m.TestRunExecutionWithStats]]: """ - Retrieve test runs, including statistics. Args: project_id: Filter test runs by project. archived: Get archived test runs, when true will return archived test runs only, when false only non-archived test runs are returned. skip: Pagination offset. limit: Max number of records to return. Returns: List of test runs with execution statistics. + Retrieve test runs, including statistics. + Args: + project_id: Filter test runs by project. + archived: Get archived test runs, when true will return archived test runs only, when false only non-archived test runs are returned. + skip: Pagination offset. + limit: Max number of records to return. + sort_order: Sort order for results. Either "asc" or "desc". Defaults to "asc". Results are sorted by ID. + Returns: + List of test runs with execution statistics. """ query_params = {} if project_id is not None: @@ -163,6 +177,8 @@ def _build_for_read_test_run_executions_api_v1_test_run_executions_get( query_params["skip"] = str(skip) if limit is not None: query_params["limit"] = str(limit) + if sort_order is not None: + query_params["sort_order"] = str(sort_order) return self.api_client.request( type_=List[m.TestRunExecutionWithStats], @@ -205,7 +221,13 @@ def _build_for_unarchive_api_v1_test_run_executions_id_unarchive_post( self, id: int ) -> Awaitable[m.TestRunExecution]: """ - Unarchive test run execution by id. Args: id (int): test run execution id Raises: HTTPException: if no test run execution exists for provided id Returns: TestRunExecution: test run execution record that was unarchived + Unarchive test run execution by id. + Args: + id (int): test run execution id + Raises: + HTTPException: if no test run execution exists for provided id + Returns: + TestRunExecution: test run execution record that was unarchived """ path_params = {"id": str(id)} @@ -307,12 +329,26 @@ async def read_test_run_executions_api_v1_test_run_executions_get( search_query: Optional[str] = None, skip: Optional[int] = None, limit: Optional[int] = None, + sort_order: Optional[str] = None, ) -> List[m.TestRunExecutionWithStats]: """ - Retrieve test runs, including statistics. Args: project_id: Filter test runs by project. archived: Get archived test runs, when true will return archived test runs only, when false only non-archived test runs are returned. skip: Pagination offset. limit: Max number of records to return. Returns: List of test runs with execution statistics. + Retrieve test runs, including statistics. + Args: + project_id: Filter test runs by project. + archived: Get archived test runs, when true will return archived test runs only, when false only non-archived test runs are returned. + skip: Pagination offset. + limit: Max number of records to return. + sort_order: Sort order for results. Either "asc" or "desc". Defaults to "asc". Results are sorted by ID. + Returns: + List of test runs with execution statistics. """ return await self._build_for_read_test_run_executions_api_v1_test_run_executions_get( - project_id=project_id, archived=archived, search_query=search_query, skip=skip, limit=limit + project_id=project_id, + archived=archived, + search_query=search_query, + skip=skip, + limit=limit, + sort_order=sort_order, ) async def remove_test_run_execution_api_v1_test_run_executions_id_delete( @@ -356,7 +392,13 @@ def abort_testing_api_v1_test_run_executions_abort_testing_post( def archive_api_v1_test_run_executions_id_archive_post(self, id: int) -> m.TestRunExecution: """ - Archive test run execution by id. Args: id (int): test run execution id Raises: HTTPException: if no test run execution exists for provided id Returns: TestRunExecution: test run execution record that was archived + Archive test run execution by id. + Args: + id (int): test run execution id + Raises: + HTTPException: if no test run execution exists for provided id + Returns: + TestRunExecution: test run execution record that was archived """ coroutine = self._build_for_archive_api_v1_test_run_executions_id_archive_post(id=id) return get_event_loop().run_until_complete(coroutine) @@ -389,7 +431,11 @@ def download_log_api_v1_test_run_executions_id_log_get( self, id: int, json_entries: Optional[bool] = None, download: Optional[bool] = None ) -> None: """ - Download the logs from a test run. Args: id (int): Id of the TestRunExectution the log is requested for json_entries (bool, optional): When set, return each log line as a json object download (bool, optional): When set, return as attachment + Download the logs from a test run. + Args: + id (int): Id of the TestRunExectution the log is requested for + json_entries (bool, optional): When set, return each log line as a json object + download (bool, optional): When set, return as attachment """ coroutine = self._build_for_download_log_api_v1_test_run_executions_id_log_get( id=id, json_entries=json_entries, download=download @@ -400,7 +446,8 @@ def get_test_runner_status_api_v1_test_run_executions_status_get( self, ) -> m.TestRunnerStatus: """ - Retrieve status of the Test Engine. When the Test Engine is actively running the status will include the current test_run and the details of the states. + Retrieve status of the Test Engine. + When the Test Engine is actively running the status will include the current test_run and the details of the states. """ coroutine = self._build_for_get_test_runner_status_api_v1_test_run_executions_status_get() return get_event_loop().run_until_complete(coroutine) @@ -419,12 +466,26 @@ def read_test_run_executions_api_v1_test_run_executions_get( search_query: Optional[str] = None, skip: Optional[int] = None, limit: Optional[int] = None, + sort_order: Optional[str] = None, ) -> List[m.TestRunExecutionWithStats]: """ - Retrieve test runs, including statistics. Args: project_id: Filter test runs by project. archived: Get archived test runs, when true will return archived test runs only, when false only non-archived test runs are returned. skip: Pagination offset. limit: Max number of records to return. Returns: List of test runs with execution statistics. + Retrieve test runs, including statistics. + Args: + project_id: Filter test runs by project. + archived: Get archived test runs, when true will return archived test runs only, when false only non-archived test runs are returned. + skip: Pagination offset. + limit: Max number of records to return. + sort_order: Sort order for results. Either "asc" or "desc". Defaults to "asc". Results are sorted by ID. + Returns: + List of test runs with execution statistics. """ coroutine = self._build_for_read_test_run_executions_api_v1_test_run_executions_get( - project_id=project_id, archived=archived, search_query=search_query, skip=skip, limit=limit + project_id=project_id, + archived=archived, + search_query=search_query, + skip=skip, + limit=limit, + sort_order=sort_order, ) return get_event_loop().run_until_complete(coroutine) @@ -446,14 +507,22 @@ def start_test_run_execution_api_v1_test_run_executions_id_start_post( def unarchive_api_v1_test_run_executions_id_unarchive_post(self, id: int) -> m.TestRunExecution: """ - Unarchive test run execution by id. Args: id (int): test run execution id Raises: HTTPException: if no test run execution exists for provided id Returns: TestRunExecution: test run execution record that was unarchived + Unarchive test run execution by id. + Args: + id (int): test run execution id + Raises: + HTTPException: if no test run execution exists for provided id + Returns: + TestRunExecution: test run execution record that was unarchived """ coroutine = self._build_for_unarchive_api_v1_test_run_executions_id_unarchive_post(id=id) return get_event_loop().run_until_complete(coroutine) def upload_file_api_v1_test_run_executions_file_upload_post(self, file: IO[Any]) -> m.Any: """ - Upload a file to the specified path of the current test run. Args: file: The file to upload. + Upload a file to the specified path of the current test run. + Args: + file: The file to upload. """ coroutine = self._build_for_upload_file_api_v1_test_run_executions_file_upload_post(file=file) return get_event_loop().run_until_complete(coroutine) diff --git a/th_cli/commands/run_tests.py b/th_cli/commands/run_tests.py index d540398..1a3efb7 100644 --- a/th_cli/commands/run_tests.py +++ b/th_cli/commands/run_tests.py @@ -170,9 +170,7 @@ async def run_tests( await client.aclose() -async def __project_config( - async_apis: AsyncApis, project_id: int | None = None -) -> m.TestEnvironmentConfig: +async def __project_config(async_apis: AsyncApis, project_id: int | None = None) -> m.TestEnvironmentConfig: """Retrieve project configuration for given project ID or default configuration if none provided.""" projects_api = async_apis.projects_api diff --git a/th_cli/commands/test_run_execution.py b/th_cli/commands/test_run_execution.py index e40bdf9..2a87b13 100644 --- a/th_cli/commands/test_run_execution.py +++ b/th_cli/commands/test_run_execution.py @@ -24,8 +24,8 @@ from th_cli.exceptions import CLIError, handle_api_error from th_cli.utils import __print_json -table_format_header = "{:<5} {:<55} {:<30}" -table_format = "{:<5} {:<55} {:<30}" +table_format_header = "{:<6} {:<55} {}" +table_format = "{:<6} {} {}" @click.command( @@ -56,7 +56,22 @@ default=None, required=False, type=int, - help=colorize_help("Maximum number of test runs to fetch"), + help=colorize_help("Maximum number of test runs to fetch (default: 100)"), +) +@click.option( + "--sort", + default="desc", + required=False, + type=click.Choice(["asc", "desc"], case_sensitive=False), + help=colorize_help("Sort order for test runs by ID. 'desc' shows highest ID first, 'asc' shows lowest ID first"), +) +@click.option( + "--project-id", + "-p", + default=None, + required=False, + type=int, + help=colorize_help("Filter test runs by project ID"), ) @click.option( "--log", @@ -70,12 +85,29 @@ default=False, help=colorize_help("Print JSON response for more details (not applicable with --log)"), ) -def test_run_execution(id: int | None, skip: int | None, limit: int | None, log: bool, json: bool) -> None: +@click.option( + "--all", + is_flag=True, + default=False, + help=colorize_help("Fetch all test run executions with screen pagination (cannot be used with --limit)"), +) +def test_run_execution( + id: int | None, + skip: int | None, + limit: int | None, + sort: str, + project_id: int | None, + log: bool, + json: bool, + all: bool, +) -> None: """Manage test run executions - list history or fetch logs""" # Validate options - if log and (skip is not None or limit is not None): - raise click.ClickException("--skip and --limit options are not applicable when fetching logs (--log)") + if log and (skip is not None or limit is not None or project_id is not None): + raise click.ClickException( + "--skip, --limit, and --project-id options are not applicable when fetching logs (--log)" + ) if log and id is None: raise click.ClickException("--log requires --id to specify which test run execution to fetch logs for") @@ -83,6 +115,15 @@ def test_run_execution(id: int | None, skip: int | None, limit: int | None, log: if log and json: raise click.ClickException("--json option is not applicable when fetching logs (--log)") + if log and sort != "desc": + raise click.ClickException("--sort option is not applicable when fetching logs (--log)") + + if all and limit is not None: + raise click.ClickException("--all and --limit cannot be used together") + + if log and all: + raise click.ClickException("--all option is not applicable when fetching logs (--log)") + try: with closing(get_client()) as client: sync_apis = SyncApis(client) @@ -92,7 +133,7 @@ def test_run_execution(id: int | None, skip: int | None, limit: int | None, log: elif id is not None: __test_run_execution_by_id(sync_apis, id, json) else: - __test_run_execution_batch(sync_apis, json, skip, limit) + __test_run_execution_batch(sync_apis, json, skip, limit, sort, all, project_id) except CLIError: raise # Re-raise CLI Errors as-is @@ -110,18 +151,108 @@ def __test_run_execution_by_id(sync_apis: SyncApis, id: int, json: bool) -> None handle_api_error(e, "get test run execution") +def __print_filters_info( + skip: int | None, limit: int | None, sort_order: str, show_all: bool = False, project_id: int | None = None +) -> str: + """Generate comprehensive filter and pagination information text.""" + filters = [] + + # Project filter + if project_id is not None: + filters.append(f"Project ID: {project_id}") + + # Order information (more descriptive than just "Sort: DESC") + if sort_order == "desc": + filters.append("Order: newest first") + else: + filters.append("Order: oldest first") + + # Pagination info + if show_all: + filters.append("Results: ALL RECORDS") + else: + # Skip info + if skip is not None: + filters.append(f"Skip: {skip}") + else: + filters.append("Skip: 0 (from start)") + + # Limit info + if limit is not None: + filters.append(f"Limit: {limit}") + else: + filters.append("Limit: 100 (default)") + + return f"🔍 Active Filters: {' • '.join(filters)}" + + def __test_run_execution_batch( - sync_apis: SyncApis, json: bool | None, skip: int | None = None, limit: int | None = None + sync_apis: SyncApis, + json: bool | None, + skip: int | None = None, + limit: int | None = None, + sort_order: str = "desc", + show_all: bool = False, + project_id: int | None = None, ) -> None: try: test_run_execution_api = sync_apis.test_run_executions_api + + # When --all is used, set limit to 0 to get all results + effective_limit = 0 if show_all else limit + test_run_executions = test_run_execution_api.read_test_run_executions_api_v1_test_run_executions_get( - skip=skip, limit=limit + skip=skip, limit=effective_limit, sort_order=sort_order, project_id=project_id ) + if json: __print_json(test_run_executions) else: - __print_table_test_executions(test_run_executions) + if show_all: + # Use click's pager for --all option (like git log) + output_lines = [] + output_lines.append( + click.style( + __print_filters_info(skip, limit, sort_order, show_all, project_id), fg="cyan", bold=True + ) + ) + output_lines.append("") # Empty line + + # Add header + output_lines.append(colorize_header(table_format_header.format("ID", "Title", "State"))) + + # Add all test executions + if isinstance(test_run_executions, list): + for item in test_run_executions: + # Get raw values to calculate proper padding + title_value = item.title + + # Apply styling + styled_title = italic(title_value) + + # Calculate padding needed for title (55 chars total) + title_padding = max(0, 55 - len(title_value)) + + output_lines.append( + table_format.format( + item.id, + styled_title, + " " * title_padding, + ) + + colorize_state((item.state).value) + ) + + # Use pager to display all content + click.echo_via_pager("\n".join(output_lines)) + else: + # Regular output with filter info + click.echo( + click.style( + __print_filters_info(skip, limit, sort_order, show_all, project_id), fg="cyan", bold=True + ) + ) + click.echo() # Add empty line for readability + __print_table_test_executions(test_run_executions) except UnexpectedResponse as e: handle_api_error(e, "get test run executions") @@ -151,12 +282,23 @@ def __print_table_test_executions(test_execution: list) -> None: def __print_table_test_execution(item: dict, print_header=True) -> None: print_header and __print_table_header() + + # Get raw values to calculate proper padding + title_value = item.get("title") + + # Apply styling + styled_title = italic(title_value) + + # Calculate padding needed for title (55 chars total) + title_padding = max(0, 55 - len(title_value)) + click.echo( table_format.format( item.get("id"), - italic(item.get("title")), - colorize_state((item.get("state")).value), + styled_title, + " " * title_padding, ) + + colorize_state((item.get("state")).value) )