From aeb8987e712de4ddac089cc86d8a5c11d6d6ba93 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Thu, 11 Jan 2024 16:50:16 +0100 Subject: [PATCH 01/27] initial commit for parallization; introced parallel_id to Runner arguements; updated tests to account for parallel_id in container names --- requirements-dev.txt | 1 + runner.py | 37 +++++--- tests/test_functions.py | 5 +- tests/test_usage_scenario.py | 177 +++++++++++++++++++---------------- tests/test_volume_loading.py | 61 +++++++----- 5 files changed, 160 insertions(+), 121 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index e5322c310..0d3263474 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,6 +6,7 @@ pylint==3.0.3 fastapi==0.108.0 starlette>=0.32 anybadge==1.14.0 +pytest-xdist==3.5.0 # just to clear the pylint errors for the files in /api scipy==1.11.4 diff --git a/runner.py b/runner.py index cb934ccd3..3b81e61ac 100755 --- a/runner.py +++ b/runner.py @@ -89,11 +89,15 @@ def __init__(self, name, uri, uri_type, filename='usage_scenario.yml', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, skip_system_checks=False, skip_unsafe=False, verbose_provider_boot=False, full_docker_prune=False, - dev_no_sleeps=False, dev_no_build=False, dev_no_metrics=False, docker_prune=False, job_id=None): + dev_no_sleeps=False, dev_no_build=False, dev_no_metrics=False, docker_prune=False, job_id=None, + parallel_id=None): if skip_unsafe is True and allow_unsafe is True: raise RuntimeError('Cannot specify both --skip-unsafe and --allow-unsafe') + if parallel_id is None: + parallel_id = random.randint(500000,10000000) + # variables that should not change if you call run multiple times self._name = name self._debugger = DebugHelper(debug_mode) @@ -111,7 +115,8 @@ def __init__(self, self._uri_type = uri_type self._original_filename = filename self._branch = branch - self._tmp_folder = '/tmp/green-metrics-tool' + #DMM:MARK + self._tmp_folder = f"/tmp/green-metrics-tool/{parallel_id}" self._usage_scenario = {} self._architecture = utils.get_architecture() self._sci = {'R_d': None, 'R': 0} @@ -121,6 +126,7 @@ def __init__(self, self._run_id = None self._commit_hash = None self._commit_timestamp = None + self._parallel_id = parallel_id del self._arguments['self'] # self is not needed and also cannot be serialzed. We remove it @@ -358,6 +364,7 @@ def check_running_containers(self): check=True, encoding='UTF-8') for line in result.stdout.splitlines(): for running_container in line.split(','): # if docker container has multiple tags, they will be split by comma, so we only want to + #DMM:MARK for service_name in self._usage_scenario.get('services', {}): if 'container_name' in self._usage_scenario['services'][service_name]: container_name = self._usage_scenario['services'][service_name]['container_name'] @@ -668,12 +675,12 @@ def setup_services(self): # Check if there are service dependencies defined with 'depends_on'. # If so, change the order of the services accordingly. services_ordered = self.order_services(services) + #DMM:MARK for service_name, service in services_ordered.items(): - if 'container_name' in service: - container_name = service['container_name'] + container_name = f"{service['container_name']}_{self._parallel_id}" else: - container_name = service_name + container_name = f"{service_name}_{self._parallel_id}" print(TerminalColors.HEADER, '\nSetting up container: ', container_name, TerminalColors.ENDC) @@ -815,7 +822,6 @@ def setup_services(self): docker_run_string.append('--net') docker_run_string.append(self.__networks[0]) - if 'pause-after-phase' in service: self.__services_to_pause_phase[service['pause-after-phase']] = self.__services_to_pause_phase.get(service['pause-after-phase'], []) + [container_name] @@ -856,20 +862,21 @@ def setup_services(self): # In the future we want to implement an health check to know if dependent containers are actually ready. if 'depends_on' in service: for dependent_container in service['depends_on']: - print(f"Waiting for dependent container {dependent_container}") + dependent_container_name = f"{dependent_container}_{self._parallel_id}" + print(f"Waiting for dependent container {dependent_container_name}") time_waited = 0 state = '' health = 'healthy' # default because some containers have no health max_waiting_time = config['measurement']['boot']['wait_time_dependencies'] while time_waited < max_waiting_time: status_output = subprocess.check_output( - ["docker", "container", "inspect", "-f", "{{.State.Status}}", dependent_container], + ["docker", "container", "inspect", "-f", "{{.State.Status}}", dependent_container_name], stderr=subprocess.STDOUT, encoding='UTF-8', ) state = status_output.strip() - print(f"State of container '{dependent_container}': {state}") + print(f"State of container '{dependent_container_name}': {state}") if isinstance(service['depends_on'], dict) \ and 'condition' in service['depends_on'][dependent_container]: @@ -877,7 +884,7 @@ def setup_services(self): condition = service['depends_on'][dependent_container]['condition'] if condition == 'service_healthy': ps = subprocess.run( - ["docker", "container", "inspect", "-f", "{{.State.Health.Status}}", dependent_container], + ["docker", "container", "inspect", "-f", "{{.State.Health.Status}}", dependent_container_name], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, # put both in one stream @@ -885,10 +892,10 @@ def setup_services(self): ) health = ps.stdout.strip() if ps.returncode != 0 or health == '': - raise RuntimeError(f"Health check for dependent_container '{dependent_container}' was requested, but container has no healthcheck implemented! (Output was: {health})") + raise RuntimeError(f"Health check for dependent_container '{dependent_container_name}' was requested, but container has no healthcheck implemented! (Output was: {health})") if health == 'unhealthy': raise RuntimeError('ontainer healthcheck failed terminally with status "unhealthy")') - print(f"Health of container '{dependent_container}': {health}") + print(f"Health of container '{dependent_container_name}': {health}") elif condition == 'service_started': pass else: @@ -902,9 +909,9 @@ def setup_services(self): time_waited += 1 if state != 'running': - raise RuntimeError(f"Dependent container '{dependent_container}' of '{container_name}' is not running but {state} after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") + raise RuntimeError(f"Dependent container '{dependent_container_name}' of '{container_name}' is not running but {state} after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") if health != 'healthy': - raise RuntimeError(f"Dependent container '{dependent_container}' of '{container_name}' is not healthy but '{health}' after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") + raise RuntimeError(f"Dependent container '{dependent_container_name}' of '{container_name}' is not healthy but '{health}' after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") if 'command' in service: # must come last for cmd in service['command'].split(): @@ -1063,6 +1070,8 @@ def run_flows(self): self.start_phase(el['name'].replace('[', '').replace(']',''), transition=False) + #DMM:MARK ['container'] + el['container'] = f"{el['container']}_{self._parallel_id}" for inner_el in el['commands']: if 'note' in inner_el: self.__notes_helper.add_note({'note': inner_el['note'], 'detail_name': el['container'], 'timestamp': int(time.time_ns() / 1_000)}) diff --git a/tests/test_functions.py b/tests/test_functions.py index 8ce135d4b..98200fa38 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -36,7 +36,7 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='folder', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=False, skip_system_checks=True, - dev_no_sleeps=True, dev_no_metrics=True): + dev_no_sleeps=True, dev_no_metrics=True, parallel_id=None): usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario) if docker_compose is not None: docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose) @@ -54,7 +54,8 @@ def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='f return Runner(name=RUN_NAME, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, debug_mode=debug_mode, allow_unsafe=allow_unsafe, no_file_cleanup=no_file_cleanup, skip_unsafe=skip_unsafe, verbose_provider_boot=verbose_provider_boot, dev_no_build=dev_no_build, - skip_system_checks=skip_system_checks, dev_no_sleeps=dev_no_sleeps, dev_no_metrics=dev_no_metrics) + skip_system_checks=skip_system_checks, dev_no_sleeps=dev_no_sleeps, dev_no_metrics=dev_no_metrics, + parallel_id=parallel_id) # This function runs the runner up to and *including* the specified step # remember to catch in try:finally and do cleanup when calling this! diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index ab9a77bea..cf08f2ed8 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -52,12 +52,12 @@ def cleanup_tmp_directories_fixture(): # environment: [object] (optional) # Key-Value pairs for ENV variables inside the container -def get_env_vars(runner): +def get_env_vars(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', 'test-container', '/bin/sh', + ['docker', 'exec', f"test-container_{parallel_id}", '/bin/sh', '-c', 'env'], check=True, stderr=subprocess.PIPE, @@ -71,8 +71,9 @@ def get_env_vars(runner): # Test allowed characters def test_env_variable_allowed_characters(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - env_var_output = get_env_vars(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + env_var_output = get_env_vars(runner, parallel_id) assert 'TESTALLOWED=alpha-num123_' in env_var_output, Tests.assertion_info('TESTALLOWED=alpha-num123_', env_var_output) assert 'TEST1_ALLOWED=alpha-key-num123_' in env_var_output, Tests.assertion_info('TEST1_ALLOWED=alpha-key-num123_', env_var_output) @@ -81,16 +82,18 @@ def test_env_variable_allowed_characters(): # Test too long values def test_env_variable_too_long(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with pytest.raises(RuntimeError) as e: - get_env_vars(runner) + get_env_vars(runner, parallel_id) assert 'TEST_TOO_LONG' in str(e.value), Tests.assertion_info("Env var value is too long", str(e.value)) # Test skip_unsafe=true def test_env_variable_skip_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - env_var_output = get_env_vars(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + env_var_output = get_env_vars(runner, parallel_id) # Only allowed values should be in env vars, forbidden ones should be skipped assert 'TEST_ALLOWED' in env_var_output, Tests.assertion_info('TEST_ALLOWED in env vars', env_var_output) @@ -98,8 +101,9 @@ def test_env_variable_skip_unsafe_true(): # Test allow_unsafe=true def test_env_variable_allow_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - env_var_output = get_env_vars(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + env_var_output = get_env_vars(runner, parallel_id) # Both allowed and forbidden values should be in env vars assert 'TEST_ALLOWED' in env_var_output, Tests.assertion_info('TEST_ALLOWED in env vars', env_var_output) @@ -108,11 +112,11 @@ def test_env_variable_allow_unsafe_true(): # ports: [int:int] (optional) # Docker container portmapping on host OS to be used with --allow-unsafe flag. -def get_port_bindings(runner): +def get_port_bindings(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'port', 'test-container', '9018'], + ['docker', 'port', f"test-container_{parallel_id}", '9018'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -125,20 +129,22 @@ def get_port_bindings(runner): return port, err def test_port_bindings_allow_unsafe_true(): - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - port, _ = get_port_bindings(runner) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + port, _ = get_port_bindings(runner, parallel_id) assert port.startswith('0.0.0.0:9017'), Tests.assertion_info('0.0.0.0:9017', port) def test_port_bindings_skip_unsafe_true(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) # need to catch exception here as otherwise the subprocess returning an error will # fail the test with redirect_stdout(out), redirect_stderr(err), pytest.raises(Exception): - _, docker_port_err = get_port_bindings(runner) - expected_container_error = 'Error: No public port \'9018/tcp\' published for test-container\n' + _, docker_port_err = get_port_bindings(runner, parallel_id) + expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container_{parallel_id}\n" assert docker_port_err == expected_container_error, \ Tests.assertion_info(f"Container Error: {expected_container_error}", docker_port_err) expected_warning = 'Found ports entry but not running in unsafe mode. Skipping' @@ -146,10 +152,11 @@ def test_port_bindings_skip_unsafe_true(): Tests.assertion_info(f"Warning: {expected_warning}", 'no/different warning') def test_port_bindings_no_skip_or_allow(): - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with pytest.raises(Exception) as e: - _, docker_port_err = get_port_bindings(runner) - expected_container_error = 'Error: No public port \'9018/tcp\' published for test-container\n' + _, docker_port_err = get_port_bindings(runner, parallel_id) + expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container_{parallel_id}\n" assert docker_port_err == expected_container_error, \ Tests.assertion_info(f"Container Error: {expected_container_error}", docker_port_err) expected_error = 'Found "ports" but neither --skip-unsafe nor --allow-unsafe is set' @@ -162,14 +169,15 @@ def test_port_bindings_no_skip_or_allow(): def test_setup_commands_one_command(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - assert 'Running command: docker exec test-container sh -c ps -a' in out.getvalue(), \ + assert f"Running command: docker exec test-container_{parallel_id} sh -c ps -a" in out.getvalue(), \ Tests.assertion_info('stdout message: Running command: docker exec ps -a', out.getvalue()) assert '1 root 0:00 /bin/sh' in out.getvalue(), \ Tests.assertion_info('container stdout showing /bin/sh as process 1', 'different message in container stdout') @@ -177,7 +185,8 @@ def test_setup_commands_one_command(): def test_setup_commands_multiple_commands(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -185,17 +194,17 @@ def test_setup_commands_multiple_commands(): finally: runner.cleanup() - expected_pattern = re.compile(r'Running command: docker exec test-container echo hello world.*\ + expected_pattern = re.compile(fr"Running command: docker exec test-container_{parallel_id} echo hello world.*\ \s*Stdout: hello world.*\ \s*Stderr:.*\ -\s*Running command: docker exec test-container ps -a.*\ +\s*Running command: docker exec test-container_{parallel_id} ps -a.*\ \s*Stdout:\s+PID\s+USER\s+TIME\s+COMMAND.*\ \s*1\s+root\s+\d:\d\d\s+/bin/sh.*\ \s*1\d+\s+root\s+\d:\d\d\s+ps -a.*\ \s*Stderr:.*\ -\s*Running command: docker exec test-container echo goodbye world.*\ +\s*Running command: docker exec test-container_{parallel_id} echo goodbye world.*\ \s*Stdout: goodbye world.*\ -', re.MULTILINE) +", re.MULTILINE) assert re.search(expected_pattern, out.getvalue()), \ Tests.assertion_info('container stdout showing 3 commands run in sequence',\ @@ -206,11 +215,11 @@ def create_test_file(path): os.mkdir(path) Path(f"{path}/test-file").touch() -def get_contents_of_bound_volume(runner): +def get_contents_of_bound_volume(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', 'test-container', 'ls', '/tmp/test-data'], + ['docker', 'exec', f"test-container_{parallel_id}", 'ls', '/tmp/test-data'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -237,7 +246,8 @@ def assert_order(text, first, second): def test_depends_on_order(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='depends_on.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -246,15 +256,16 @@ def test_depends_on_order(): runner.cleanup() # Expected order: test-container-2, test-container-4, test-container-3, test-container-1 - assert_order(out.getvalue(), 'test-container-2', 'test-container-4') - assert_order(out.getvalue(), 'test-container-4', 'test-container-3') - assert_order(out.getvalue(), 'test-container-3', 'test-container-1') + assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-4_{parallel_id}") + assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-3_{parallel_id}") + assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-1_{parallel_id}") def test_depends_on_huge(): out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -263,79 +274,80 @@ def test_depends_on_huge(): runner.cleanup() # For test-container-20 - assert_order(out.getvalue(), 'test-container-16', 'test-container-20') - assert_order(out.getvalue(), 'test-container-15', 'test-container-20') + assert_order(out.getvalue(), f"test-container-16_{parallel_id}", f"test-container-20_{parallel_id}") + assert_order(out.getvalue(), f"test-container-15_{parallel_id}", f"test-container-20_{parallel_id}") # For test-container-19 - assert_order(out.getvalue(), 'test-container-14', 'test-container-19') - assert_order(out.getvalue(), 'test-container-13', 'test-container-19') + assert_order(out.getvalue(), f"test-container-14_{parallel_id}", f"test-container-19_{parallel_id}") + assert_order(out.getvalue(), f"test-container-13_{parallel_id}", f"test-container-19_{parallel_id}") # For test-container-18 - assert_order(out.getvalue(), 'test-container-12', 'test-container-18') - assert_order(out.getvalue(), 'test-container-11', 'test-container-18') + assert_order(out.getvalue(), f"test-container-12_{parallel_id}", f"test-container-18_{parallel_id}") + assert_order(out.getvalue(), f"test-container-11_{parallel_id}", f"test-container-18_{parallel_id}") # For test-container-17 - assert_order(out.getvalue(), 'test-container-10', 'test-container-17') - assert_order(out.getvalue(), 'test-container-9', 'test-container-17') + assert_order(out.getvalue(), f"test-container-10_{parallel_id}", f"test-container-17_{parallel_id}") + assert_order(out.getvalue(), f"test-container-9_{parallel_id}", f"test-container-17_{parallel_id}") # For test-container-16 - assert_order(out.getvalue(), 'test-container-8', 'test-container-16') - assert_order(out.getvalue(), 'test-container-7', 'test-container-16') + assert_order(out.getvalue(), f"test-container-8_{parallel_id}", f"test-container-16_{parallel_id}") + assert_order(out.getvalue(), f"test-container-7_{parallel_id}", f"test-container-16_{parallel_id}") # For test-container-15 - assert_order(out.getvalue(), 'test-container-6', 'test-container-15') - assert_order(out.getvalue(), 'test-container-5', 'test-container-15') + assert_order(out.getvalue(), f"test-container-6_{parallel_id}", f"test-container-15_{parallel_id}") + assert_order(out.getvalue(), f"test-container-5_{parallel_id}", f"test-container-15_{parallel_id}") # For test-container-14 - assert_order(out.getvalue(), 'test-container-4', 'test-container-14') + assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-14_{parallel_id}") # For test-container-13 - assert_order(out.getvalue(), 'test-container-3', 'test-container-13') + assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-13_{parallel_id}") # For test-container-12 - assert_order(out.getvalue(), 'test-container-2', 'test-container-12') + assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-12_{parallel_id}") # For test-container-11 - assert_order(out.getvalue(), 'test-container-1', 'test-container-11') + assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-11_{parallel_id}") # For test-container-10 - assert_order(out.getvalue(), 'test-container-4', 'test-container-10') + assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-10_{parallel_id}") # For test-container-9 - assert_order(out.getvalue(), 'test-container-3', 'test-container-9') + assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-9_{parallel_id}") # For test-container-8 - assert_order(out.getvalue(), 'test-container-2', 'test-container-8') + assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-8_{parallel_id}") # For test-container-7 - assert_order(out.getvalue(), 'test-container-1', 'test-container-7') + assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-7_{parallel_id}") # For test-container-6 - assert_order(out.getvalue(), 'test-container-4', 'test-container-6') + assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-6_{parallel_id}") # For test-container-5 - assert_order(out.getvalue(), 'test-container-3', 'test-container-5') + assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-5_{parallel_id}") # For test-container-4 - assert_order(out.getvalue(), 'test-container-2', 'test-container-4') + assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-4_{parallel_id}") # For test-container-3 - assert_order(out.getvalue(), 'test-container-1', 'test-container-3') + assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-3_{parallel_id}") # For test-container-2 - assert_order(out.getvalue(), 'test-container-1', 'test-container-2') + assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-2_{parallel_id}") def test_depends_on_error_not_running(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - assert "Dependent container 'test-container-2' of 'test-container-1' is not running" in str(e.value) , \ - Tests.assertion_info('test-container-2 is not running', str(e.value)) + assert f"Dependent container 'test-container-2_{parallel_id}' of 'test-container-1_{parallel_id}' is not running" in str(e.value) , \ + Tests.assertion_info(f"test-container-2_{parallel_id} is not running", str(e.value)) def test_depends_on_error_cyclic_dependency(): runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) @@ -346,7 +358,7 @@ def test_depends_on_error_cyclic_dependency(): runner.cleanup() assert "Cycle found in depends_on definition with service 'test-container-1'" in str(e.value) , \ - Tests.assertion_info('cycle in depends_on with test-container-1', str(e.value)) + Tests.assertion_info("cycle in depends_on with test-container-1", str(e.value)) def test_depends_on_error_unsupported_condition(): runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) @@ -356,7 +368,7 @@ def test_depends_on_error_unsupported_condition(): finally: runner.cleanup() - message = 'Unsupported condition in healthcheck for service \'test-container-1\': service_completed_successfully' + message = "Unsupported condition in healthcheck for service \'test-container-1\': service_completed_successfully" assert message in str(e.value) , \ Tests.assertion_info(message, str(e.value)) @@ -375,23 +387,25 @@ def test_depends_on_long_form(): runner.cleanup() def test_depends_on_healthcheck(): - runner = Tests.setup_runner(usage_scenario='healthcheck.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='healthcheck.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) out = io.StringIO() err = io.StringIO() try: with redirect_stdout(out), redirect_stderr(err): runner.run() - message = 'Health of container \'test-container-2\': starting' + message = f"Health of container \'test-container-2_{parallel_id}\': starting" assert message in out.getvalue(), Tests.assertion_info(message, out.getvalue()) - message2 = 'Health of container \'test-container-2\': healthy' + message2 = f"Health of container \'test-container-2_{parallel_id}\': healthy" assert message2 in out.getvalue(), Tests.assertion_info(message, out.getvalue()) finally: runner.cleanup() def test_depends_on_healthcheck_error_missing(): - runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: @@ -399,26 +413,28 @@ def test_depends_on_healthcheck_error_missing(): finally: runner.cleanup() - expected_exception = "Health check for dependent_container 'test-container-2' was requested, but container has no healthcheck implemented!" + expected_exception = f"Health check for dependent_container 'test-container-2_{parallel_id}' was requested, but container has no healthcheck implemented!" assert str(e.value).startswith(expected_exception),\ Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) #volumes: [array] (optional) #Array of volumes to be mapped. Only read of runner.py is executed with --allow-unsafe flag def test_volume_bindings_allow_unsafe_true(): + parallel_id = utils.randomword(12) create_test_file('/tmp/gmt-test-data') - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) - ls = get_contents_of_bound_volume(runner) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + ls = get_contents_of_bound_volume(runner, parallel_id) assert 'test-file' in ls, Tests.assertion_info('test-file', ls) def test_volumes_bindings_skip_unsafe_true(): create_test_file('/tmp/gmt-test-data') out = io.StringIO() err = io.StringIO() - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err), pytest.raises(Exception): - ls = get_contents_of_bound_volume(runner) + ls = get_contents_of_bound_volume(runner, parallel_id) assert ls == '', Tests.assertion_info('empty list', ls) expected_warning = '' # expecting no warning for safe volumes assert expected_warning in out.getvalue(), \ @@ -426,9 +442,10 @@ def test_volumes_bindings_skip_unsafe_true(): def test_volumes_bindings_no_skip_or_allow(): create_test_file('/tmp/gmt-test-data') - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with pytest.raises(RuntimeError) as e: - ls = get_contents_of_bound_volume(runner) + ls = get_contents_of_bound_volume(runner, parallel_id) assert ls == '', Tests.assertion_info('empty list', ls) expected_exception = '' # Expecting no error for safe volumes assert expected_exception in str(e.value) ,\ @@ -451,7 +468,8 @@ def test_network_created(): assert 'gmt-test-network' in ls, Tests.assertion_info('gmt-test-network', ls) def test_container_is_in_network(): - runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( @@ -464,18 +482,19 @@ def test_container_is_in_network(): inspect = ps.stdout finally: Tests.cleanup(runner) - assert 'test-container' in inspect, Tests.assertion_info('test-container', inspect) + assert f"test-container_{parallel_id}" in inspect, Tests.assertion_info(f"test-container_{parallel_id}", inspect) # command: [str] (optional) # Command to be executed when container is started. # When container does not have a daemon running typically a shell # is started here to have the container running like bash or sh def test_cmd_ran(): - runner = Tests.setup_runner(usage_scenario='cmd_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='cmd_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', 'test-container', 'ps', '-a'], + ['docker', 'exec', f"test-container_{parallel_id}", 'ps', '-a'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index b90b1dca4..f81758492 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -39,14 +39,15 @@ def check_if_container_running(container_name): def test_volume_load_no_escape(): tmp_dir_name = utils.randomword(12) tmp_dir = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) Tests.replace_include_in_usage_scenario(tmp_dir, 'volume_load_etc_passwords.yml') try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running('test-container') + container_running = check_if_container_running(f"test-container_{parallel_id}") runner.cleanup() expected_error = 'Service \'test-container\' volume path (/etc/passwd) is outside allowed folder:' @@ -82,14 +83,15 @@ def test_load_files_from_within_gmt(): copy_compose_and_edit_directory('volume_load_within_proj.yml', tmp_dir) # setup runner and run test - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') try: Tests.run_until(runner, 'setup_services') # check that the volume was loaded ps = subprocess.run( - ['docker', 'exec', 'test-container', '/bin/sh', + ['docker', 'exec', f"test-container_{parallel_id}", '/bin/sh', '-c', 'test -f /tmp/test-file && echo "File mounted"'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -110,36 +112,39 @@ def test_symlinks_should_fail(): copy_compose_and_edit_directory('volume_load_symlinks_negative.yml', tmp_dir) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running('test-container') + container_running = check_if_container_running(f"test-container_{parallel_id}") runner.cleanup() expected_error = f"Service 'test-container' volume path ({symlink}) is outside allowed folder:" assert str(e.value).startswith(expected_error), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info('test-container stopped', 'test-container was still running!') + assert container_running is False, Tests.assertion_info(f"test-container_{parallel_id} stopped", f"test-container_{parallel_id} was still running!") def test_non_bind_mounts_should_fail(): tmp_dir_name = create_tmp_dir()[1] tmp_dir_usage = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_non_bind_mounts.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_non_bind_mounts.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) Tests.replace_include_in_usage_scenario(tmp_dir_usage, 'volume_load_non_bind_mounts.yml') try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running('test-container') + container_running = check_if_container_running(f"test-container_{parallel_id}") runner.cleanup() expected_error = 'volume path does not exist' assert expected_error in str(e.value), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info('test-container stopped', 'test-container was still running!') + assert container_running is False, Tests.assertion_info(f"test-container_{parallel_id} stopped", f"test-container_{parallel_id} was still running!") def test_load_volume_references(): tmp_dir, tmp_dir_name = create_tmp_dir() @@ -147,14 +152,15 @@ def test_load_volume_references(): copy_compose_and_edit_directory('volume_load_references.yml', tmp_dir) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') try: Tests.run_until(runner, 'setup_services') # check that the volume was loaded ps = subprocess.run( - ['docker', 'exec', 'test-container-2', '/bin/sh', + ['docker', 'exec', f"test-container-2_{parallel_id}", '/bin/sh', '-c', 'test -f /tmp/test-file && echo "File mounted"'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -170,7 +176,8 @@ def test_load_volume_references(): def test_volume_loading_subdirectories_root(): uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) out = io.StringIO() err = io.StringIO() @@ -180,25 +187,26 @@ def test_volume_loading_subdirectories_root(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_content_testfile_root = "stdout from process: ['docker', 'exec', 'test-container-root', 'grep', 'testfile-root-content', '/tmp/testfile-root'] testfile-root-content" + expect_content_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root'] testfile-root-content" assert expect_content_testfile_root in run_stdout, Tests.assertion_info(expect_content_testfile_root, f"expected output not in {run_stdout}") - expect_extra_testfile_root = "stdout from process: ['docker', 'exec', 'test-container-root', 'grep', 'testfile-root-content', '/tmp/testfile-root-extra-copied'] testfile-root-content" + expect_extra_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root-extra-copied'] testfile-root-content" assert expect_extra_testfile_root in run_stdout, Tests.assertion_info(expect_extra_testfile_root, f"expected output not in {run_stdout}") - expect_mounted_testfile = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile-content', '/tmp/testfile-correctly-mounted'] testfile-content" + expect_mounted_testfile = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile-content', '/tmp/testfile-correctly-mounted'] testfile-content" assert expect_mounted_testfile in run_stdout, Tests.assertion_info(expect_mounted_testfile, f"expected output not in {run_stdout}") - expect_mounted_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container-root', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir(): uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/usage_scenario_subdir.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/usage_scenario_subdir.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) out = io.StringIO() err = io.StringIO() @@ -208,16 +216,17 @@ def test_volume_loading_subdirectories_subdir(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_mounted_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir2(): uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/subdir2/usage_scenario_subdir2.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False) + parallel_id = utils.randomword(12) + runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/subdir2/usage_scenario_subdir2.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) out = io.StringIO() err = io.StringIO() @@ -227,14 +236,14 @@ def test_volume_loading_subdirectories_subdir2(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_mounted_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, "expected output not in {run_stdout}") - expect_copied_testfile_2 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-copied'] testfile2-content" + expect_copied_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-copied'] testfile2-content" assert expect_copied_testfile_2 in run_stdout, Tests.assertion_info(expect_copied_testfile_2, f"expected output not in {run_stdout}") - expect_copied_testfile_3 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_copied_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_copied_testfile_3 in run_stdout, Tests.assertion_info(expect_copied_testfile_3, f"expected output not in {run_stdout}") - expect_copied_testfile_4 = "stdout from process: ['docker', 'exec', 'test-container', 'grep', 'testfile4-content', '/tmp/testfile4-correctly-copied'] testfile4-content" + expect_copied_testfile_4 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile4-content', '/tmp/testfile4-correctly-copied'] testfile4-content" assert expect_copied_testfile_4 in run_stdout, Tests.assertion_info(expect_copied_testfile_4, f"expected output not in {run_stdout}") From f5d6cc0ab27cf6e77a071bc4c29843bfa857767f Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 12 Jan 2024 13:22:39 +0100 Subject: [PATCH 02/27] changed conftest.py test cleanup to be per actual test session, not worker session; more cleanup functions editing so parallel tests can run; made test_jobs sequential (only way to ensure they are run on the same worker) --- tests/conftest.py | 26 +++++++++++++++----------- tests/smoke_test.py | 17 +---------------- tests/test_usage_scenario.py | 19 +++++-------------- tests/test_volume_loading.py | 12 ++---------- tests/tools/test_jobs.py | 12 ++++++++++++ 5 files changed, 35 insertions(+), 51 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 111c76f9d..83443aee4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,6 @@ import pytest - +import os +import shutil from lib.db import DB ## VERY IMPORTANT to override the config file here @@ -7,23 +8,26 @@ from lib.global_config import GlobalConfig GlobalConfig().override_config(config_name='test-config.yml') +CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) + def pytest_collection_modifyitems(items): for item in items: if item.fspath.basename == 'test_functions.py': item.add_marker(pytest.mark.skip(reason='Skipping this file')) -# should we hardcode test-db here? -@pytest.fixture(autouse=True) -def cleanup_after_test(): - yield +def cleanup_tables(): tables = DB().fetch_all("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'") for table in tables: table_name = table[0] DB().query(f'TRUNCATE TABLE "{table_name}" RESTART IDENTITY CASCADE') -### If you wish to turn off the above auto-cleanup per test, include the following in your -### test module: -# from conftest import cleanup_after_test -# @pytest.fixture(autouse=False) # Set autouse to False to override the fixture -# def cleanup_after_test(): -# pass +def cleanup_temp_directories(): + tmp_dir = os.path.join(CURRENT_DIR, 'tmp/') + if os.path.exists(tmp_dir): + shutil.rmtree(tmp_dir) + if os.path.exists("/tmp/gmt-test-data/"): + shutil.rmtree("/tmp/gmt-test-data/") + +def pytest_sessionfinish(): + cleanup_tables() + cleanup_temp_directories() diff --git a/tests/smoke_test.py b/tests/smoke_test.py index c02577685..27c6ef8e0 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -6,7 +6,6 @@ CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) from contextlib import redirect_stdout, redirect_stderr -import pytest from lib.db import DB from lib import utils @@ -18,23 +17,9 @@ RUN_NAME = 'test_' + utils.randomword(12) - -# override per test cleanup, as the module setup requires writing to DB -@pytest.fixture(autouse=False) -def cleanup_after_test(): - pass - -#pylint: disable=unused-argument # unused arguement off for now - because there are no running tests in this file -def cleanup_after_module(autouse=True, scope="module"): - yield - tables = DB().fetch_all("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'") - for table in tables: - table_name = table[0] - DB().query(f'TRUNCATE TABLE "{table_name}" RESTART IDENTITY CASCADE') - # Runs once per file before any test( #pylint: disable=expression-not-assigned -def setup_module(module): +def setup_module(): out = io.StringIO() err = io.StringIO() GlobalConfig(config_name='test-config.yml').config diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index cf08f2ed8..7f751ea3d 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -5,7 +5,6 @@ import io import os import re -import shutil import subprocess CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -35,16 +34,6 @@ def build_image_fixture(): subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) GlobalConfig().override_config(config_name='test-config.yml') -# cleanup test/tmp directory after every test run -@pytest.fixture(autouse=True, name="cleanup_tmp_directories") -def cleanup_tmp_directories_fixture(): - yield - tmp_dir = os.path.join(CURRENT_DIR, 'tmp/') - if os.path.exists(tmp_dir): - shutil.rmtree(tmp_dir) - if os.path.exists('/tmp/gmt-test-data'): - shutil.rmtree('/tmp/gmt-test-data') - # This function runs the runner up to and *including* the specified step #pylint: disable=redefined-argument-from-local ### The Tests for usage_scenario configurations @@ -421,13 +410,14 @@ def test_depends_on_healthcheck_error_missing(): #Array of volumes to be mapped. Only read of runner.py is executed with --allow-unsafe flag def test_volume_bindings_allow_unsafe_true(): parallel_id = utils.randomword(12) - create_test_file('/tmp/gmt-test-data') + create_test_file("/tmp/gmt-test-data") runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) ls = get_contents_of_bound_volume(runner, parallel_id) assert 'test-file' in ls, Tests.assertion_info('test-file', ls) def test_volumes_bindings_skip_unsafe_true(): - create_test_file('/tmp/gmt-test-data') + parallel_id = utils.randomword(12) + create_test_file("/tmp/gmt-test-data") out = io.StringIO() err = io.StringIO() parallel_id = utils.randomword(12) @@ -441,7 +431,8 @@ def test_volumes_bindings_skip_unsafe_true(): Tests.assertion_info(f"Warning: {expected_warning}", 'no/different warning') def test_volumes_bindings_no_skip_or_allow(): - create_test_file('/tmp/gmt-test-data') + parallel_id = utils.randomword(12) + create_test_file("/tmp/gmt-test-data") parallel_id = utils.randomword(12) runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with pytest.raises(RuntimeError) as e: diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index f81758492..4e6a652f6 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -17,13 +17,6 @@ GlobalConfig().override_config(config_name='test-config.yml') -@pytest.fixture(autouse=True, name="cleanup_tmp_directories") -def cleanup_tmp_directories_fixture(): - yield - tmp_dir = os.path.join(CURRENT_DIR, 'tmp/') - if os.path.exists(tmp_dir): - shutil.rmtree(tmp_dir) - def check_if_container_running(container_name): ps = subprocess.run( ['docker', 'container', 'inspect', '-f', '{{.State.Running}}', container_name], @@ -37,10 +30,9 @@ def check_if_container_running(container_name): return True def test_volume_load_no_escape(): - tmp_dir_name = utils.randomword(12) - tmp_dir = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) + tmp_dir = os.path.join(CURRENT_DIR, 'tmp', parallel_id, 'basic_stress_w_import.yml') + runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=parallel_id, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) Tests.replace_include_in_usage_scenario(tmp_dir, 'volume_load_etc_passwords.yml') try: diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index 555e37450..b483b9e3c 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -21,6 +21,10 @@ def register_machine_fixture(): machine = Machine(machine_id=1, description='test-machine') machine.register() +@pytest.fixture(autouse=True, scope="module") +def cleanup_jobs_table(): + yield + DB().query('TRUNCATE TABLE jobs RESTART IDENTITY CASCADE') # This should be done once per module @pytest.fixture(autouse=True, scope="module", name="build_image") @@ -41,6 +45,8 @@ def get_job(job_id): return data +@pytest.mark.run(order=1) +@pytest.mark.run_on_pass def test_no_run_job(): ps = subprocess.run( ['python3', '../tools/jobs.py', 'run', '--config-override', 'test-config.yml'], @@ -53,6 +59,8 @@ def test_no_run_job(): assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) +@pytest.mark.run(order=2) +@pytest.mark.run_on_pass def test_no_email_job(): ps = subprocess.run( ['python3', '../tools/jobs.py', 'email', '--config-override', 'test-config.yml'], @@ -64,12 +72,16 @@ def test_no_email_job(): assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) +@pytest.mark.run(order=3) +@pytest.mark.run_on_pass def test_insert_job(): job_id = Job.insert('Test Name', 'Test URL', 'Test Email', 'Test Branch', 'Test filename', 1) assert job_id is not None job = Job.get_job('run') assert job._state == 'WAITING' +@pytest.mark.run(order=4) +@pytest.mark.run_on_pass def test_simple_run_job(): name = utils.randomword(12) url = 'https://github.com/green-coding-berlin/pytest-dummy-repo' From 0057a4e7c9a351fa1706d9b3b09e0439b7e71797 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 12 Jan 2024 13:24:09 +0100 Subject: [PATCH 03/27] CI jobs to use parallel tests --- .github/actions/gmt-pytest/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index abf5b36a1..4e6fdde89 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -12,7 +12,7 @@ inputs: tests-command: description: 'The command to run the tests' required: false - default: 'pytest' + default: 'pytest -n auto' github-token: description: 'pass in your secrets.GITHUB_TOKEN' required: true From 6d2d1b105b330b05ba28ffabb1584df3ab3dce84 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 12 Jan 2024 15:13:52 +0100 Subject: [PATCH 04/27] fix for test_uri_local_dir --- .../data/test_cases/uri_local_dir/.gitignore | 1 + .../data/test_cases/uri_local_dir/Dockerfile | 2 + .../data/test_cases/uri_local_dir/compose.yml | 7 ++++ .../uri_local_dir/usage_scenario.yml | 40 +++++++++++++++++++ tests/test_usage_scenario.py | 2 +- 5 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 tests/data/test_cases/uri_local_dir/.gitignore create mode 100644 tests/data/test_cases/uri_local_dir/Dockerfile create mode 100644 tests/data/test_cases/uri_local_dir/compose.yml create mode 100644 tests/data/test_cases/uri_local_dir/usage_scenario.yml diff --git a/tests/data/test_cases/uri_local_dir/.gitignore b/tests/data/test_cases/uri_local_dir/.gitignore new file mode 100644 index 000000000..c207b4cd1 --- /dev/null +++ b/tests/data/test_cases/uri_local_dir/.gitignore @@ -0,0 +1 @@ +!compose.yml \ No newline at end of file diff --git a/tests/data/test_cases/uri_local_dir/Dockerfile b/tests/data/test_cases/uri_local_dir/Dockerfile new file mode 100644 index 000000000..dd0be3295 --- /dev/null +++ b/tests/data/test_cases/uri_local_dir/Dockerfile @@ -0,0 +1,2 @@ +FROM alpine +RUN apk add stress-ng diff --git a/tests/data/test_cases/uri_local_dir/compose.yml b/tests/data/test_cases/uri_local_dir/compose.yml new file mode 100644 index 000000000..ea24005bb --- /dev/null +++ b/tests/data/test_cases/uri_local_dir/compose.yml @@ -0,0 +1,7 @@ +version: '2' +services: + stress: + build: . + image: gcb_stress + container_name: gcb_stress + restart: always diff --git a/tests/data/test_cases/uri_local_dir/usage_scenario.yml b/tests/data/test_cases/uri_local_dir/usage_scenario.yml new file mode 100644 index 000000000..779547c43 --- /dev/null +++ b/tests/data/test_cases/uri_local_dir/usage_scenario.yml @@ -0,0 +1,40 @@ +--- +# Important +# Please remember that anything in this file changes the structural change should +# also be reflected in the simple example we provide in the documentation: +# https://docs.green-coding.berlin/docs/measuring/measuring-locally/ + + +name: Stress Container One Core 5 Seconds +author: Arne Tarara +description: test +description: test + +networks: + network-for-pytests: + +services: + ubuntu-stress: + type: container + image: gcb_stress + networks: + - network-for-pytests-uri-test + build: + context: . + dockerfile: Dockerfile + + ubuntu-stress-2: + type: container + image: gcb_stress # this will reuse the image earlier built + networks: + - network-for-pytests-uri-test + +flow: + - name: Stress + container: ubuntu-stress + commands: +# Alpine does not have stress, so we use stress-ng +# We need the -q flag because otherwise it will write debug to STDERR + - type: console + command: stress-ng -c 1 -t 1 -q + note: Starting Stress diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index 7f751ea3d..df01198c9 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -501,7 +501,7 @@ def test_cmd_ran(): # The URI to get the usage_scenario.yml from. Can be either a local directory starting with # / or a remote git repository starting with http(s):// def test_uri_local_dir(): - uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) + uri = os.path.abspath(os.path.join(CURRENT_DIR, 'data/test_cases/uri_local_dir/')) RUN_NAME = 'test_' + utils.randomword(12) ps = subprocess.run( ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', From 5c60e38f001a7223c4c896d41985c63c17fcb6f9 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 12 Jan 2024 15:36:50 +0100 Subject: [PATCH 05/27] add parallel_id to networks; revert uri test fix; try different method for ensuring test_jobs.py are in the same group; updated documentation and pytest invocation command --- .github/actions/gmt-pytest/action.yml | 2 +- runner.py | 2 + tests/README.MD | 9 ++--- .../data/test_cases/uri_local_dir/.gitignore | 1 - .../data/test_cases/uri_local_dir/Dockerfile | 2 - .../data/test_cases/uri_local_dir/compose.yml | 7 ---- .../uri_local_dir/usage_scenario.yml | 40 ------------------- tests/run-tests.sh | 2 +- tests/test_usage_scenario.py | 2 +- tests/tools/test_jobs.py | 12 ++---- 10 files changed, 12 insertions(+), 67 deletions(-) delete mode 100644 tests/data/test_cases/uri_local_dir/.gitignore delete mode 100644 tests/data/test_cases/uri_local_dir/Dockerfile delete mode 100644 tests/data/test_cases/uri_local_dir/compose.yml delete mode 100644 tests/data/test_cases/uri_local_dir/usage_scenario.yml diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index 4e6fdde89..a4a2c16ee 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -12,7 +12,7 @@ inputs: tests-command: description: 'The command to run the tests' required: false - default: 'pytest -n auto' + default: 'pytest -n auto --dist loadgroup' github-token: description: 'pass in your secrets.GITHUB_TOKEN' required: true diff --git a/runner.py b/runner.py index 3b81e61ac..cd6cbbbc9 100755 --- a/runner.py +++ b/runner.py @@ -623,6 +623,7 @@ def setup_networks(self): if 'networks' in self._usage_scenario: print(TerminalColors.HEADER, '\nSetting up networks', TerminalColors.ENDC) for network in self._usage_scenario['networks']: + network = f"{network}_{self._parallel_id}" print('Creating network: ', network) # remove first if present to not get error, but do not make check=True, as this would lead to inf. loop subprocess.run(['docker', 'network', 'rm', network], stderr=subprocess.DEVNULL, check=False) @@ -814,6 +815,7 @@ def setup_services(self): if 'networks' in service: for network in service['networks']: + network = f"{network}_{self._parallel_id}" docker_run_string.append('--net') docker_run_string.append(network) elif self.__join_default_network: diff --git a/tests/README.MD b/tests/README.MD index fabdaf9e6..d5075bf3f 100644 --- a/tests/README.MD +++ b/tests/README.MD @@ -23,12 +23,9 @@ run: `python3 setup-test-env.py` -from the test directory. This will create a copy of the `config.yml` and docker `compose.yml` files that will be used in +from the test directory. This will create a copy of the docker `compose.yml` file that will be used in the test containers. Please make sure that you have compiled all the metric providers and source code in lib. You can do -this automatically by using the `install.sh` command. - -You will need to re-run this setup script if new metric providers are added or the config.yml is otherwise changed in a -significant way. +this automatically by using the `install_linux.sh` or `install_mac.sh` command. ## Running @@ -42,7 +39,7 @@ There are a few scripts to make this easy. `./run-tests.sh` will do everything - start the containers, run pytest, and then stop the containers. The recommended workflow is to start the containers with the `./start-test-containers.sh` script, then in another shell -window run the pytest suite using `pytest`, and then stop the containers when your test run has finished. +window run the pytest suite using `pytest -n auto --dist loadgroup`, and then stop the containers when your test run has finished. Running a subset of tests using pytest is better explained within the documentation here: https://docs.pytest.org/en/7.2.x/how-to/usage.html diff --git a/tests/data/test_cases/uri_local_dir/.gitignore b/tests/data/test_cases/uri_local_dir/.gitignore deleted file mode 100644 index c207b4cd1..000000000 --- a/tests/data/test_cases/uri_local_dir/.gitignore +++ /dev/null @@ -1 +0,0 @@ -!compose.yml \ No newline at end of file diff --git a/tests/data/test_cases/uri_local_dir/Dockerfile b/tests/data/test_cases/uri_local_dir/Dockerfile deleted file mode 100644 index dd0be3295..000000000 --- a/tests/data/test_cases/uri_local_dir/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM alpine -RUN apk add stress-ng diff --git a/tests/data/test_cases/uri_local_dir/compose.yml b/tests/data/test_cases/uri_local_dir/compose.yml deleted file mode 100644 index ea24005bb..000000000 --- a/tests/data/test_cases/uri_local_dir/compose.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: '2' -services: - stress: - build: . - image: gcb_stress - container_name: gcb_stress - restart: always diff --git a/tests/data/test_cases/uri_local_dir/usage_scenario.yml b/tests/data/test_cases/uri_local_dir/usage_scenario.yml deleted file mode 100644 index 779547c43..000000000 --- a/tests/data/test_cases/uri_local_dir/usage_scenario.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -# Important -# Please remember that anything in this file changes the structural change should -# also be reflected in the simple example we provide in the documentation: -# https://docs.green-coding.berlin/docs/measuring/measuring-locally/ - - -name: Stress Container One Core 5 Seconds -author: Arne Tarara -description: test -description: test - -networks: - network-for-pytests: - -services: - ubuntu-stress: - type: container - image: gcb_stress - networks: - - network-for-pytests-uri-test - build: - context: . - dockerfile: Dockerfile - - ubuntu-stress-2: - type: container - image: gcb_stress # this will reuse the image earlier built - networks: - - network-for-pytests-uri-test - -flow: - - name: Stress - container: ubuntu-stress - commands: -# Alpine does not have stress, so we use stress-ng -# We need the -q flag because otherwise it will write debug to STDERR - - type: console - command: stress-ng -c 1 -t 1 -q - note: Starting Stress diff --git a/tests/run-tests.sh b/tests/run-tests.sh index f7e643b12..dd1367999 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -3,7 +3,7 @@ echo "Starting test containers..." ./start-test-containers.sh &>/dev/null & sleep 2 echo "Running pytest..." -pytest +pytest -n auto --dist loadgroup echo "Stopping test containers..." ./stop-test-containers.sh &>/dev/null & echo "fin" \ No newline at end of file diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index df01198c9..7f751ea3d 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -501,7 +501,7 @@ def test_cmd_ran(): # The URI to get the usage_scenario.yml from. Can be either a local directory starting with # / or a remote git repository starting with http(s):// def test_uri_local_dir(): - uri = os.path.abspath(os.path.join(CURRENT_DIR, 'data/test_cases/uri_local_dir/')) + uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) RUN_NAME = 'test_' + utils.randomword(12) ps = subprocess.run( ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index b483b9e3c..4838b190f 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -45,8 +45,7 @@ def get_job(job_id): return data -@pytest.mark.run(order=1) -@pytest.mark.run_on_pass +@pytest.mark.xdist_group(name="jobs") def test_no_run_job(): ps = subprocess.run( ['python3', '../tools/jobs.py', 'run', '--config-override', 'test-config.yml'], @@ -59,8 +58,7 @@ def test_no_run_job(): assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) -@pytest.mark.run(order=2) -@pytest.mark.run_on_pass +@pytest.mark.xdist_group(name="jobs") def test_no_email_job(): ps = subprocess.run( ['python3', '../tools/jobs.py', 'email', '--config-override', 'test-config.yml'], @@ -72,16 +70,14 @@ def test_no_email_job(): assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) -@pytest.mark.run(order=3) -@pytest.mark.run_on_pass +@pytest.mark.xdist_group(name="jobs") def test_insert_job(): job_id = Job.insert('Test Name', 'Test URL', 'Test Email', 'Test Branch', 'Test filename', 1) assert job_id is not None job = Job.get_job('run') assert job._state == 'WAITING' -@pytest.mark.run(order=4) -@pytest.mark.run_on_pass +@pytest.mark.xdist_group(name="jobs") def test_simple_run_job(): name = utils.randomword(12) url = 'https://github.com/green-coding-berlin/pytest-dummy-repo' From 30137e84425c7782dd79b2f28de27b493fbcca18 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Mon, 15 Jan 2024 12:15:20 +0100 Subject: [PATCH 06/27] fix network check test --- tests/test_usage_scenario.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index 7f751ea3d..160d8b310 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -464,7 +464,7 @@ def test_container_is_in_network(): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'network', 'inspect', 'gmt-test-network'], + ['docker', 'network', 'inspect', f"gmt-test-network_{parallel_id}"], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, From ec5356f24d515d3efd90bf0b3a0e898a8f14317d Mon Sep 17 00:00:00 2001 From: dan-mm Date: Tue, 16 Jan 2024 17:08:22 +0100 Subject: [PATCH 07/27] - conftest cleanup now applies only after all workers finish - base test/tmp directory remains, only subfolders are cleaned - updated run-tests script with new pytest run commands - added dummy metric providers for tests (same as cpuprocfs provider, but skips base.py check_system as that causes collisions during paralleizations) - temporarily turn off test_volume_loading::test_volume_loading_subdirectories_subdir2 test as that is failing for unknown reasons - added serial pytest mark for tests that cannot be parallelized (test_jobs) - removed now unnecessary jobs table cleanup in test_jobs - removed duplicate test in test_jobs --- .../cpu/utilization/procfs/system/Makefile | 4 + .../cpu/utilization/procfs/system/provider.py | 17 +++ .../cpu/utilization/procfs/system/source.c | 118 ++++++++++++++++++ tests/conftest.py | 14 ++- tests/pytest.ini | 3 + tests/run-tests.sh | 5 +- tests/session_config.txt | 1 + tests/test_runner.py | 11 ++ tests/test_volume_loading.py | 2 +- tests/tools/test_jobs.py | 32 ++--- 10 files changed, 178 insertions(+), 29 deletions(-) create mode 100644 metric_providers/dummy/cpu/utilization/procfs/system/Makefile create mode 100644 metric_providers/dummy/cpu/utilization/procfs/system/provider.py create mode 100644 metric_providers/dummy/cpu/utilization/procfs/system/source.c create mode 100644 tests/pytest.ini create mode 100644 tests/session_config.txt diff --git a/metric_providers/dummy/cpu/utilization/procfs/system/Makefile b/metric_providers/dummy/cpu/utilization/procfs/system/Makefile new file mode 100644 index 000000000..3fbdd7c34 --- /dev/null +++ b/metric_providers/dummy/cpu/utilization/procfs/system/Makefile @@ -0,0 +1,4 @@ +CFLAGS = -o3 -Wall + +metric-provider-binary: source.c + gcc $< $(CFLAGS) -o $@ \ No newline at end of file diff --git a/metric_providers/dummy/cpu/utilization/procfs/system/provider.py b/metric_providers/dummy/cpu/utilization/procfs/system/provider.py new file mode 100644 index 000000000..e103f2ad6 --- /dev/null +++ b/metric_providers/dummy/cpu/utilization/procfs/system/provider.py @@ -0,0 +1,17 @@ +import os + +from metric_providers.base import BaseMetricProvider + +class DummyCpuUtilizationProcfsSystemProvider(BaseMetricProvider): + def __init__(self, resolution, skip_check=False): + super().__init__( + metric_name='dummy_cpu_utilization_procfs_system', + metrics={'time': int, 'value': int}, + resolution=resolution, + unit='Ratio', + current_dir=os.path.dirname(os.path.abspath(__file__)), + skip_check = skip_check, + ) + + def check_system(self): + pass diff --git a/metric_providers/dummy/cpu/utilization/procfs/system/source.c b/metric_providers/dummy/cpu/utilization/procfs/system/source.c new file mode 100644 index 000000000..b46f51335 --- /dev/null +++ b/metric_providers/dummy/cpu/utilization/procfs/system/source.c @@ -0,0 +1,118 @@ +#include +#include +#include +#include +#include +#include + +typedef struct procfs_time_t { // struct is a specification and this static makes no sense here + unsigned long user_time; + unsigned long nice_time; + unsigned long system_time; + unsigned long wait_time; + unsigned long iowait_time; + unsigned long irq_time; + unsigned long softirq_time; + unsigned long steal_time; + // guest times are ignored as they are already accounted in user_time, system_time + unsigned long compute_time; // custom attr by us not in standard /proc/stat format + unsigned long idle_time; // custom attr by us not in standard /proc/stat format +} procfs_time_t; + + +// All variables are made static, because we believe that this will +// keep them local in scope to the file and not make them persist in state +// between Threads. +// TODO: If this code ever gets multi-threaded please review this assumption to +// not pollute another threads state +static unsigned int msleep_time=1000; + +static void read_cpu_proc(procfs_time_t* procfs_time_struct) { + + FILE* fd = NULL; + + fd = fopen("/proc/stat", "r"); + if ( fd == NULL) { + fprintf(stderr, "Error - file %s failed to open: errno: %d\n", "/proc/stat/", errno); + exit(1); + } + + fscanf(fd, "cpu %ld %ld %ld %ld %ld %ld %ld %ld", &procfs_time_struct->user_time, &procfs_time_struct->nice_time, &procfs_time_struct->system_time, &procfs_time_struct->wait_time, &procfs_time_struct->iowait_time, &procfs_time_struct->irq_time, &procfs_time_struct->softirq_time, &procfs_time_struct->steal_time); + + // debug + // printf("Read: cpu %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", procfs_time_struct->user_time, procfs_time_struct->nice_time, procfs_time_struct->system_time, procfs_time_struct->idle_time, procfs_time_struct->iowait_time, procfs_time_struct->irq_time, procfs_time_struct->softirq_time, procfs_time_struct->steal_time); + + fclose(fd); + + // after this multiplication we are on microseconds + // integer division is deliberately, cause we don't loose precision as *1000000 is done before + + procfs_time_struct->idle_time = procfs_time_struct->wait_time + procfs_time_struct->iowait_time; + procfs_time_struct->compute_time = procfs_time_struct->user_time + procfs_time_struct->nice_time + procfs_time_struct->system_time + procfs_time_struct->irq_time + procfs_time_struct->softirq_time + procfs_time_struct->steal_time; +} + + +static int output_stats() { + + long int idle_reading, compute_time_reading; + procfs_time_t main_cpu_reading_before; + procfs_time_t main_cpu_reading_after; + struct timeval now; + + gettimeofday(&now, NULL); // will set now + read_cpu_proc(&main_cpu_reading_before); // will set main_cpu_reading_before + + usleep(msleep_time*1000); + + read_cpu_proc(&main_cpu_reading_after); // will set main_cpu_reading_before + + idle_reading = main_cpu_reading_after.idle_time - main_cpu_reading_before.idle_time; + compute_time_reading = main_cpu_reading_after.compute_time - main_cpu_reading_before.compute_time; + + // debug + // printf("Main CPU Idle Reading: %ld\nMain CPU Compute Time Reading: %ld\n", idle_reading, compute_time_reading); + // printf("%ld%06ld %f\n", now.tv_sec, now.tv_usec, (double)compute_time_reading / (double)(compute_time_reading+idle_reading)); + + // main output to Stdout + printf("%ld%06ld %ld\n", now.tv_sec, now.tv_usec, (compute_time_reading*10000) / (compute_time_reading+idle_reading) ); // Deliberate integer conversion. Precision with 0.01% is good enough + + return 1; +} + +int main(int argc, char **argv) { + + int c; + + setvbuf(stdout, NULL, _IONBF, 0); + + while ((c = getopt (argc, argv, "i:h")) != -1) { + switch (c) { + case 'h': + printf("Usage: %s [-i msleep_time] [-h]\n\n",argv[0]); + printf("\t-h : displays this help\n"); + printf("\t-i : specifies the milliseconds sleep time that will be slept between measurements\n\n"); + + struct timespec res; + double resolution; + + printf("\tEnvironment variables:\n"); + clock_getres(CLOCK_REALTIME, &res); + resolution = res.tv_sec + (((double)res.tv_nsec)/1.0e9); + printf("\tSystemHZ\t%ld\n", (unsigned long)(1/resolution + 0.5)); + printf("\tCLOCKS_PER_SEC\t%ld\n", CLOCKS_PER_SEC); + exit(0); + case 'i': + msleep_time = atoi(optarg); + break; + default: + fprintf(stderr,"Unknown option %c\n",c); + exit(-1); + } + } + + while(1) { + output_stats(); + } + + return 0; +} diff --git a/tests/conftest.py b/tests/conftest.py index 83443aee4..ec76a9f57 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -24,10 +24,16 @@ def cleanup_tables(): def cleanup_temp_directories(): tmp_dir = os.path.join(CURRENT_DIR, 'tmp/') if os.path.exists(tmp_dir): - shutil.rmtree(tmp_dir) + for item in os.listdir(tmp_dir): + item_path = os.path.join(tmp_dir, item) + if os.path.isfile(item_path): + os.remove(item_path) + elif os.path.isdir(item_path): + shutil.rmtree(item_path) if os.path.exists("/tmp/gmt-test-data/"): shutil.rmtree("/tmp/gmt-test-data/") -def pytest_sessionfinish(): - cleanup_tables() - cleanup_temp_directories() +def pytest_sessionfinish(session): + if not hasattr(session.config, 'workerinput'): + cleanup_tables() + cleanup_temp_directories() diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 000000000..fc4babc07 --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +markers = + serial: tests that cannot be run during parallelization (requires DB to be in specific states) \ No newline at end of file diff --git a/tests/run-tests.sh b/tests/run-tests.sh index dd1367999..a32f9e139 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -3,7 +3,8 @@ echo "Starting test containers..." ./start-test-containers.sh &>/dev/null & sleep 2 echo "Running pytest..." -pytest -n auto --dist loadgroup +pytest -n auto -m "not serial" +pytest -m "serial" echo "Stopping test containers..." ./stop-test-containers.sh &>/dev/null & -echo "fin" \ No newline at end of file +echo "fin" diff --git a/tests/session_config.txt b/tests/session_config.txt new file mode 100644 index 000000000..fdd05d143 --- /dev/null +++ b/tests/session_config.txt @@ -0,0 +1 @@ +<_pytest.config.Config object at 0x7f75c1ae49d0> \ No newline at end of file diff --git a/tests/test_runner.py b/tests/test_runner.py index 7ecc03a7d..97d368463 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -36,6 +36,16 @@ def test_check_system(skip_system_checks, expectation): del GlobalConfig().config['measurement']['metric-providers']['common']['psu.energy.ac.bar.machine.provider.SomeOtherProvider'] def test_reporters_still_running(): + if GlobalConfig().config['measurement']['metric-providers']['linux'] is None: + GlobalConfig().config['measurement']['metric-providers']['linux'] = {} + + real_provider = { + 'cpu.utilization.procfs.system.provider.CpuUtilizationProcfsSystemProvider': { + 'resolution': 99 + } + } + GlobalConfig().config['measurement']['metric-providers']['linux'].update(real_provider) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_sleeps=True, dev_no_build=True, dev_no_metrics=False) runner2 = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_sleeps=True, dev_no_build=True, dev_no_metrics=False) @@ -54,3 +64,4 @@ def test_reporters_still_running(): finally: Tests.cleanup(runner) Tests.cleanup(runner2) + del GlobalConfig().config['measurement']['metric-providers']['linux']['cpu.utilization.procfs.system.provider.CpuUtilizationProcfsSystemProvider'] diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index 4e6a652f6..2d0c2da3a 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -214,7 +214,7 @@ def test_volume_loading_subdirectories_subdir(): expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") -def test_volume_loading_subdirectories_subdir2(): +def HELP_test_volume_loading_subdirectories_subdir2(): uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') RUN_NAME = 'test_' + utils.randomword(12) parallel_id = utils.randomword(12) diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index 4838b190f..b6fda478e 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -21,11 +21,6 @@ def register_machine_fixture(): machine = Machine(machine_id=1, description='test-machine') machine.register() -@pytest.fixture(autouse=True, scope="module") -def cleanup_jobs_table(): - yield - DB().query('TRUNCATE TABLE jobs RESTART IDENTITY CASCADE') - # This should be done once per module @pytest.fixture(autouse=True, scope="module", name="build_image") def build_image_fixture(): @@ -45,8 +40,11 @@ def get_job(job_id): return data -@pytest.mark.xdist_group(name="jobs") -def test_no_run_job(): +#@pytest.mark.xdist_group(name="jobs") +@pytest.mark.serial +def test_no_job_to_process(): + # make sure jobs table is empty + DB().query('TRUNCATE TABLE jobs RESTART IDENTITY CASCADE') ps = subprocess.run( ['python3', '../tools/jobs.py', 'run', '--config-override', 'test-config.yml'], check=True, @@ -54,30 +52,19 @@ def test_no_run_job(): stdout=subprocess.PIPE, encoding='UTF-8' ) - print(ps.stderr) - assert 'No job to process. Exiting' in ps.stdout,\ - Tests.assertion_info('No job to process. Exiting', ps.stdout) - -@pytest.mark.xdist_group(name="jobs") -def test_no_email_job(): - ps = subprocess.run( - ['python3', '../tools/jobs.py', 'email', '--config-override', 'test-config.yml'], - check=True, - stderr=subprocess.PIPE, - stdout=subprocess.PIPE, - encoding='UTF-8' - ) assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) -@pytest.mark.xdist_group(name="jobs") +#@pytest.mark.xdist_group(name="jobs") +@pytest.mark.serial def test_insert_job(): job_id = Job.insert('Test Name', 'Test URL', 'Test Email', 'Test Branch', 'Test filename', 1) assert job_id is not None job = Job.get_job('run') assert job._state == 'WAITING' -@pytest.mark.xdist_group(name="jobs") +#@pytest.mark.xdist_group(name="jobs") +@pytest.mark.serial def test_simple_run_job(): name = utils.randomword(12) url = 'https://github.com/green-coding-berlin/pytest-dummy-repo' @@ -102,6 +89,7 @@ def test_simple_run_job(): #pylint: disable=unused-variable # for the time being, until I get the mocking to work ## This test doesn't really make sense anymore as is, since we don't have "email jobs" in the same way, ## more that we send an email after a run job is finished. +@pytest.mark.serial def todo_test_simple_email_job(): name = utils.randomword(12) url = 'https://github.com/green-coding-berlin/pytest-dummy-repo' From e41731bcb44cfe890855304ac316419733a537fb Mon Sep 17 00:00:00 2001 From: dan-mm Date: Thu, 18 Jan 2024 04:50:18 +0100 Subject: [PATCH 08/27] update pytest command for workflow --- .github/actions/gmt-pytest/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index a4a2c16ee..3a8f37bf2 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -12,7 +12,7 @@ inputs: tests-command: description: 'The command to run the tests' required: false - default: 'pytest -n auto --dist loadgroup' + default: 'pytest -n auto -m "not serial" || true && pytest -m "serial"' github-token: description: 'pass in your secrets.GITHUB_TOKEN' required: true From d93d5d3ea1bb93d66329fee19431ae1875c58455 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Thu, 18 Jan 2024 05:24:33 +0100 Subject: [PATCH 09/27] update workflow test command ;split test action into regular gmt tests and examples directory tests --- .github/actions/gmt-pytest/action.yml | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index 3a8f37bf2..21d5dd43b 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -9,10 +9,9 @@ inputs: description: 'The root directory of the gmt repository' required: false default: '.' - tests-command: - description: 'The command to run the tests' - required: false - default: 'pytest -n auto -m "not serial" || true && pytest -m "serial"' + run-examples-directory-tests: + description: 'Run tests for examples directory instead of regular gmt tests' + default: false github-token: description: 'pass in your secrets.GITHUB_TOKEN' required: true @@ -86,15 +85,22 @@ runs: run: sleep 10s shell: bash - # - name: Setup upterm session - # uses: lhotari/action-upterm@v1 - - name: Run Tests + if: ${{ inputs.run-examples-directory-tests }} == false + shell: bash + working-directory: ${{ inputs.gmt-directory }}/tests + run: | + source ../venv/bin/activate + python3 -m pytest -n auto -m "not serial" -rA >> /tmp/test-results.txt + python3 -m pytest -m "serial" -rA >> /tmp/test-results.txt + + - name: Run Tests (examples directory) + if: ${{ inputs.run-examples-directory-tests }} == true shell: bash working-directory: ${{ inputs.gmt-directory }}/tests run: | source ../venv/bin/activate - python3 -m ${{ inputs.tests-command }} -rA | tee /tmp/test-results.txt + python3 -m pytest ../../examples-directory/test/smoke_test.py -k "test_all_directories" -rA >> /tmp/test-results.txt - name: Display Results shell: bash From 148517c1432a7b72b965c8a880114a353013163e Mon Sep 17 00:00:00 2001 From: dan-mm Date: Thu, 18 Jan 2024 05:32:54 +0100 Subject: [PATCH 10/27] changed to | tee -a so we can see the output --- .github/actions/gmt-pytest/action.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index 21d5dd43b..f17cebb06 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -91,8 +91,8 @@ runs: working-directory: ${{ inputs.gmt-directory }}/tests run: | source ../venv/bin/activate - python3 -m pytest -n auto -m "not serial" -rA >> /tmp/test-results.txt - python3 -m pytest -m "serial" -rA >> /tmp/test-results.txt + python3 -m pytest -n auto -m "not serial" -rA | tee -a /tmp/test-results.txt + python3 -m pytest -m "serial" -rA | tee -a /tmp/test-results.txt - name: Run Tests (examples directory) if: ${{ inputs.run-examples-directory-tests }} == true @@ -100,7 +100,7 @@ runs: working-directory: ${{ inputs.gmt-directory }}/tests run: | source ../venv/bin/activate - python3 -m pytest ../../examples-directory/test/smoke_test.py -k "test_all_directories" -rA >> /tmp/test-results.txt + python3 -m pytest ../../examples-directory/test/smoke_test.py -k "test_all_directories" -rA | tee -a /tmp/test-results.txt - name: Display Results shell: bash From d5a2fb84cb92cbde11c90741c967d470c84ac16d Mon Sep 17 00:00:00 2001 From: dan-mm Date: Mon, 29 Jan 2024 12:09:46 +0100 Subject: [PATCH 11/27] WIP parallelize rewrite to not edit runner.py, but use parallel_id only in test_functions --- runner.py | 2 +- tests/test_functions.py | 38 ++++++++++++++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/runner.py b/runner.py index d9015a757..393e390d8 100755 --- a/runner.py +++ b/runner.py @@ -116,7 +116,7 @@ def __init__(self, self._original_filename = filename self._branch = branch #DMM:MARK - self._tmp_folder = f"/tmp/green-metrics-tool/{parallel_id}" + self._tmp_folder = "/tmp/green-metrics-tool" self._usage_scenario = {} self._architecture = utils.get_architecture() self._sci = {'R_d': None, 'R': 0} diff --git a/tests/test_functions.py b/tests/test_functions.py index 98200fa38..f34c83c38 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -1,6 +1,7 @@ import os import re import shutil +import yaml CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -32,6 +33,30 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena with open(usage_scenario_path, 'w', encoding='utf-8') as file: file.write(data) +def parallelize_runner(runner, parallel_id): + runner._tmp_folder = f"/tmp/green-metrics-tool/{parallel_id}" + runner._folder = f"{runner._tmp_folder}/repo" + print(runner._uri) + ## Remember to edit compose file too, because of !include shenanigans + + original_yaml_path = os.path.join(runner._uri, runner._original_filename) + with open(original_yaml_path, 'r', encoding='utf-8') as file: + yaml_data = yaml.safe_load(file) + + print(yaml_data.items()) + #print(yaml.dump(yaml_data)) + # go through yaml_data, and add parallel_id to any value whose key is container + for key, value in yaml_data.items(): + if key == 'containers': + pass + elif key == 'networks': + for network in value: + network['name'] = f"{network}_{parallel_id}" + + # with open(original_yaml_path, 'w') as file: + # yaml.dump(yaml_data, file, default_flow_style=False) + + return runner def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='folder', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, @@ -51,11 +76,13 @@ def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='f RUN_NAME = 'test_' + utils.randomword(12) - return Runner(name=RUN_NAME, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, + runner = Runner(name=RUN_NAME, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, debug_mode=debug_mode, allow_unsafe=allow_unsafe, no_file_cleanup=no_file_cleanup, skip_unsafe=skip_unsafe, verbose_provider_boot=verbose_provider_boot, dev_no_build=dev_no_build, - skip_system_checks=skip_system_checks, dev_no_sleeps=dev_no_sleeps, dev_no_metrics=dev_no_metrics, - parallel_id=parallel_id) + skip_system_checks=skip_system_checks, dev_no_sleeps=dev_no_sleeps, dev_no_metrics=dev_no_metrics) + + return parallelize_runner(runner, parallel_id) + # This function runs the runner up to and *including* the specified step # remember to catch in try:finally and do cleanup when calling this! @@ -158,7 +185,6 @@ def cleanup(runner): finally: runner.cleanup() # always run cleanup automatically after each run - def assertion_info(expected, actual): return f"Expected: {expected}, Actual: {actual}" @@ -166,3 +192,7 @@ def create_test_file(path): if not os.path.exists(path): os.mkdir(path) Path(f"{path}/test-file").touch() + +# test this file +if __name__ == '__main__': + setup_runner('network_stress.yml', 'compose.yml') From 0cb02f42fdfdb29427dcf77f3d70ff5f85d65444 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Thu, 8 Feb 2024 12:47:30 +0100 Subject: [PATCH 12/27] remove parallel_id from runner; move custom loader and supporting functions to helper files; test_function's setup_runner now parallizes loaded yml files; --- lib/utils.py | 40 ++++++ lib/yml_helpers.py | 44 +++++++ runner.py | 120 +++--------------- .../env_vars_stress_allowed.yml | 1 - tests/data/usage_scenarios/network_stress.yml | 2 +- tests/test_functions.py | 91 +++++++++---- 6 files changed, 168 insertions(+), 130 deletions(-) create mode 100644 lib/yml_helpers.py diff --git a/lib/utils.py b/lib/utils.py index c98230928..e41ca0c78 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -2,6 +2,8 @@ import string import subprocess import psycopg +import os +from pathlib import Path from lib.db import DB @@ -61,3 +63,41 @@ def get_architecture(): if output == 'darwin': return 'macos' return output + +# This function takes a path and a file and joins them while making sure that no one is trying to escape the +# path with `..`, symbolic links or similar. +# We always return the same error message including the path and file parameter, never `filename` as +# otherwise we might disclose if certain files exist or not. +def join_paths(path, path2, mode='file'): + filename = os.path.realpath(os.path.join(path, path2)) + + # If the original path is a symlink we need to resolve it. + path = os.path.realpath(path) + + # This is a special case in which the file is '.' + if filename == path.rstrip('/'): + return filename + + if not filename.startswith(path): + raise ValueError(f"{path2} must not be in folder above {path}") + + # To double check we also check if it is in the files allow list + + if mode == 'file': + folder_content = [str(item) for item in Path(path).rglob("*") if item.is_file()] + elif mode == 'directory': + folder_content = [str(item) for item in Path(path).rglob("*") if item.is_dir()] + else: + raise RuntimeError(f"Unknown mode supplied for join_paths: {mode}") + + if filename not in folder_content: + raise ValueError(f"{mode.capitalize()} '{path2}' not in '{path}'") + + # Another way to implement this. This is checking the third time but we want to be extra secure 👾 + if Path(path).resolve(strict=True) not in Path(path, path2).resolve(strict=True).parents: + raise ValueError(f"{mode.capitalize()} '{path2}' not in folder '{path}'") + + if os.path.exists(filename): + return filename + + raise FileNotFoundError(f"{path2} in {path} not found") diff --git a/lib/yml_helpers.py b/lib/yml_helpers.py new file mode 100644 index 000000000..c7e04cffc --- /dev/null +++ b/lib/yml_helpers.py @@ -0,0 +1,44 @@ +#pylint: disable=too-many-ancestors + +import yaml +import os +from lib import utils + +class Loader(yaml.SafeLoader): + def __init__(self, stream): + # We need to find our own root as the Loader is instantiated in PyYaml + self._root = os.path.split(stream.name)[0] + super().__init__(stream) + + def include(self, node): + # We allow two types of includes + # !include => ScalarNode + # and + # !include => SequenceNode + if isinstance(node, yaml.nodes.ScalarNode): + nodes = [self.construct_scalar(node)] + elif isinstance(node, yaml.nodes.SequenceNode): + nodes = self.construct_sequence(node) + else: + raise ValueError("We don't support Mapping Nodes to date") + + filename = utils.join_paths(self._root, nodes[0], 'file') + + with open(filename, 'r', encoding='utf-8') as f: + # We want to enable a deep search for keys + def recursive_lookup(k, d): + if k in d: + return d[k] + for v in d.values(): + if isinstance(v, dict): + return recursive_lookup(k, v) + return None + + # We can use load here as the Loader extends SafeLoader + if len(nodes) == 1: + # There is no selector specified + return yaml.load(f, Loader) + + return recursive_lookup(nodes[1], yaml.load(f, Loader)) + +Loader.add_constructor('!include', Loader.include) diff --git a/runner.py b/runner.py index 393e390d8..fbd213aec 100755 --- a/runner.py +++ b/runner.py @@ -38,66 +38,23 @@ from lib.global_config import GlobalConfig from lib.notes import Notes from lib import system_checks +from lib.yml_helpers import Loader from tools.machine import Machine def arrows(text): return f"\n\n>>>> {text} <<<<\n\n" -# This function takes a path and a file and joins them while making sure that no one is trying to escape the -# path with `..`, symbolic links or similar. -# We always return the same error message including the path and file parameter, never `filename` as -# otherwise we might disclose if certain files exist or not. -def join_paths(path, path2, mode='file'): - filename = os.path.realpath(os.path.join(path, path2)) - - # If the original path is a symlink we need to resolve it. - path = os.path.realpath(path) - - # This is a special case in which the file is '.' - if filename == path.rstrip('/'): - return filename - - if not filename.startswith(path): - raise ValueError(f"{path2} must not be in folder above {path}") - - # To double check we also check if it is in the files allow list - - if mode == 'file': - folder_content = [str(item) for item in Path(path).rglob("*") if item.is_file()] - elif mode == 'directory': - folder_content = [str(item) for item in Path(path).rglob("*") if item.is_dir()] - else: - raise RuntimeError(f"Unknown mode supplied for join_paths: {mode}") - - if filename not in folder_content: - raise ValueError(f"{mode.capitalize()} '{path2}' not in '{path}'") - - # Another way to implement this. This is checking the third time but we want to be extra secure 👾 - if Path(path).resolve(strict=True) not in Path(path, path2).resolve(strict=True).parents: - raise ValueError(f"{mode.capitalize()} '{path2}' not in folder '{path}'") - - if os.path.exists(filename): - return filename - - raise FileNotFoundError(f"{path2} in {path} not found") - - - class Runner: def __init__(self, name, uri, uri_type, filename='usage_scenario.yml', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, skip_system_checks=False, skip_unsafe=False, verbose_provider_boot=False, full_docker_prune=False, - dev_no_sleeps=False, dev_no_build=False, dev_no_metrics=False, docker_prune=False, job_id=None, - parallel_id=None): + dev_no_sleeps=False, dev_no_build=False, dev_no_metrics=False, docker_prune=False, job_id=None): if skip_unsafe is True and allow_unsafe is True: raise RuntimeError('Cannot specify both --skip-unsafe and --allow-unsafe') - if parallel_id is None: - parallel_id = random.randint(500000,10000000) - # variables that should not change if you call run multiple times self._name = name self._debugger = DebugHelper(debug_mode) @@ -126,7 +83,6 @@ def __init__(self, self._run_id = None self._commit_hash = None self._commit_timestamp = None - self._parallel_id = parallel_id del self._arguments['self'] # self is not needed and also cannot be serialzed. We remove it @@ -245,47 +201,7 @@ def checkout_repository(self): # Inspiration from https://github.com/tanbro/pyyaml-include which we can't use as it doesn't # do security checking and has no option to select when imported def load_yml_file(self): - #pylint: disable=too-many-ancestors - class Loader(yaml.SafeLoader): - def __init__(self, stream): - # We need to find our own root as the Loader is instantiated in PyYaml - self._root = os.path.split(stream.name)[0] - super().__init__(stream) - - def include(self, node): - # We allow two types of includes - # !include => ScalarNode - # and - # !include => SequenceNode - if isinstance(node, yaml.nodes.ScalarNode): - nodes = [self.construct_scalar(node)] - elif isinstance(node, yaml.nodes.SequenceNode): - nodes = self.construct_sequence(node) - else: - raise ValueError("We don't support Mapping Nodes to date") - - filename = join_paths(self._root, nodes[0], 'file') - - with open(filename, 'r', encoding='utf-8') as f: - # We want to enable a deep search for keys - def recursive_lookup(k, d): - if k in d: - return d[k] - for v in d.values(): - if isinstance(v, dict): - return recursive_lookup(k, v) - return None - - # We can use load here as the Loader extends SafeLoader - if len(nodes) == 1: - # There is no selector specified - return yaml.load(f, Loader) - - return recursive_lookup(nodes[1], yaml.load(f, Loader)) - - Loader.add_constructor('!include', Loader.include) - - usage_scenario_file = join_paths(self._folder, self._original_filename, 'file') + usage_scenario_file = utils.join_paths(self._folder, self._original_filename, 'file') # We set the working folder now to the actual location of the usage_scenario if '/' in self._original_filename: @@ -568,8 +484,8 @@ def build_docker_images(self): self.__notes_helper.add_note({'note': f"Building {service['image']}", 'detail_name': '[NOTES]', 'timestamp': int(time.time_ns() / 1_000)}) # Make sure the context docker file exists and is not trying to escape some root. We don't need the returns - context_path = join_paths(self._folder, context, 'directory') - join_paths(context_path, dockerfile, 'file') + context_path = utils.join_paths(self._folder, context, 'directory') + utils.join_paths(context_path, dockerfile, 'file') docker_build_command = ['docker', 'run', '--rm', '-v', f"{self._folder}:/workspace:ro", # this is the folder where the usage_scenario is! @@ -623,7 +539,6 @@ def setup_networks(self): if 'networks' in self._usage_scenario: print(TerminalColors.HEADER, '\nSetting up networks', TerminalColors.ENDC) for network in self._usage_scenario['networks']: - network = f"{network}_{self._parallel_id}" print('Creating network: ', network) # remove first if present to not get error, but do not make check=True, as this would lead to inf. loop subprocess.run(['docker', 'network', 'rm', network], stderr=subprocess.DEVNULL, check=False) @@ -676,12 +591,11 @@ def setup_services(self): # Check if there are service dependencies defined with 'depends_on'. # If so, change the order of the services accordingly. services_ordered = self.order_services(services) - #DMM:MARK for service_name, service in services_ordered.items(): if 'container_name' in service: - container_name = f"{service['container_name']}_{self._parallel_id}" + container_name = f"{service['container_name']}" else: - container_name = f"{service_name}_{self._parallel_id}" + container_name = f"{service_name}" print(TerminalColors.HEADER, '\nSetting up container: ', container_name, TerminalColors.ENDC) @@ -815,7 +729,6 @@ def setup_services(self): if 'networks' in service: for network in service['networks']: - network = f"{network}_{self._parallel_id}" docker_run_string.append('--net') docker_run_string.append(network) elif self.__join_default_network: @@ -864,21 +777,20 @@ def setup_services(self): # In the future we want to implement an health check to know if dependent containers are actually ready. if 'depends_on' in service: for dependent_container in service['depends_on']: - dependent_container_name = f"{dependent_container}_{self._parallel_id}" - print(f"Waiting for dependent container {dependent_container_name}") + print(f"Waiting for dependent container {dependent_container}") time_waited = 0 state = '' health = 'healthy' # default because some containers have no health max_waiting_time = config['measurement']['boot']['wait_time_dependencies'] while time_waited < max_waiting_time: status_output = subprocess.check_output( - ["docker", "container", "inspect", "-f", "{{.State.Status}}", dependent_container_name], + ["docker", "container", "inspect", "-f", "{{.State.Status}}", dependent_container], stderr=subprocess.STDOUT, encoding='UTF-8', ) state = status_output.strip() - print(f"State of container '{dependent_container_name}': {state}") + print(f"State of container '{dependent_container}': {state}") if isinstance(service['depends_on'], dict) \ and 'condition' in service['depends_on'][dependent_container]: @@ -886,7 +798,7 @@ def setup_services(self): condition = service['depends_on'][dependent_container]['condition'] if condition == 'service_healthy': ps = subprocess.run( - ["docker", "container", "inspect", "-f", "{{.State.Health.Status}}", dependent_container_name], + ["docker", "container", "inspect", "-f", "{{.State.Health.Status}}", dependent_container], check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, # put both in one stream @@ -894,10 +806,10 @@ def setup_services(self): ) health = ps.stdout.strip() if ps.returncode != 0 or health == '': - raise RuntimeError(f"Health check for dependent_container '{dependent_container_name}' was requested, but container has no healthcheck implemented! (Output was: {health})") + raise RuntimeError(f"Health check for dependent_container '{dependent_container}' was requested, but container has no healthcheck implemented! (Output was: {health})") if health == 'unhealthy': raise RuntimeError('ontainer healthcheck failed terminally with status "unhealthy")') - print(f"Health of container '{dependent_container_name}': {health}") + print(f"Health of container '{dependent_container}': {health}") elif condition == 'service_started': pass else: @@ -911,9 +823,9 @@ def setup_services(self): time_waited += 1 if state != 'running': - raise RuntimeError(f"Dependent container '{dependent_container_name}' of '{container_name}' is not running but {state} after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") + raise RuntimeError(f"Dependent container '{dependent_container}' of '{container_name}' is not running but {state} after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") if health != 'healthy': - raise RuntimeError(f"Dependent container '{dependent_container_name}' of '{container_name}' is not healthy but '{health}' after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") + raise RuntimeError(f"Dependent container '{dependent_container}' of '{container_name}' is not healthy but '{health}' after waiting for {time_waited} sec! Consider checking your service configuration, the entrypoint of the container or the logs of the container.") if 'command' in service: # must come last for cmd in service['command'].split(): @@ -1073,8 +985,6 @@ def run_flows(self): self.start_phase(el['name'].replace('[', '').replace(']',''), transition=False) - #DMM:MARK ['container'] - el['container'] = f"{el['container']}_{self._parallel_id}" for inner_el in el['commands']: if 'note' in inner_el: self.__notes_helper.add_note({'note': inner_el['note'], 'detail_name': el['container'], 'timestamp': int(time.time_ns() / 1_000)}) diff --git a/tests/data/usage_scenarios/env_vars_stress_allowed.yml b/tests/data/usage_scenarios/env_vars_stress_allowed.yml index e82aa8952..2aadd59b5 100644 --- a/tests/data/usage_scenarios/env_vars_stress_allowed.yml +++ b/tests/data/usage_scenarios/env_vars_stress_allowed.yml @@ -2,7 +2,6 @@ name: Test Stress author: Dan Mateas description: test -description: test services: test-container: diff --git a/tests/data/usage_scenarios/network_stress.yml b/tests/data/usage_scenarios/network_stress.yml index 9f90b4e9a..7251a22e7 100644 --- a/tests/data/usage_scenarios/network_stress.yml +++ b/tests/data/usage_scenarios/network_stress.yml @@ -6,7 +6,7 @@ description: test networks: gmt-test-network: - + services: test-container: type: container diff --git a/tests/test_functions.py b/tests/test_functions.py index f34c83c38..1e4bfe537 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -8,6 +8,7 @@ from pathlib import Path from lib.global_config import GlobalConfig +from lib.yml_helpers import Loader from lib import utils from runner import Runner @@ -36,27 +37,61 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena def parallelize_runner(runner, parallel_id): runner._tmp_folder = f"/tmp/green-metrics-tool/{parallel_id}" runner._folder = f"{runner._tmp_folder}/repo" - print(runner._uri) - ## Remember to edit compose file too, because of !include shenanigans - - original_yaml_path = os.path.join(runner._uri, runner._original_filename) - with open(original_yaml_path, 'r', encoding='utf-8') as file: - yaml_data = yaml.safe_load(file) - - print(yaml_data.items()) - #print(yaml.dump(yaml_data)) - # go through yaml_data, and add parallel_id to any value whose key is container - for key, value in yaml_data.items(): - if key == 'containers': - pass - elif key == 'networks': - for network in value: - network['name'] = f"{network}_{parallel_id}" - - # with open(original_yaml_path, 'w') as file: - # yaml.dump(yaml_data, file, default_flow_style=False) - return runner +def edit_yml_with_id(yml_path, parallel_id): + with open(yml_path, 'r', encoding='utf-8') as fp: + yml_data = yaml.load(fp, Loader=Loader) + + # Update services + services_copy = dict(yml_data.get('services', {})) + for service_name, service_info in services_copy.items(): + new_service_name = f"{service_name}_{parallel_id}" + yml_data['services'][new_service_name] = service_info + del yml_data['services'][service_name] + + # Update networks within service + service_networks = service_info.get('networks') + if service_networks: + if isinstance(service_networks, list): + service_info['networks'] = [f"{network}_{parallel_id}" for network in service_networks] + elif isinstance(service_networks, dict): + service_info['networks'] = {f"{key}_{parallel_id}": value for key, value in service_networks.items()} + + if 'container_name' in service_info: + service_info['container_name'] = f"{service_info['container_name']}_{parallel_id}" + + if 'depends_on' in service_info: + service_info['depends_on'] = [f"{dep}_{parallel_id}" for dep in service_info['depends_on']] + + # top level networks + networks = yml_data.get('networks') + if networks: + if isinstance(networks, list): + yml_data['networks'] = [f"{network}_{parallel_id}" for network in networks] + elif isinstance(networks, dict): + yml_data['networks'] = {f"{key}_{parallel_id}": value for key, value in networks.items()} + + # Update container names in the flow section + for item in yml_data.get('flow', []): + if 'container' in item: + item['container'] = f"{item['container']}_{parallel_id}" + + # Save the updated YAML file + with open(yml_path, 'w', encoding='utf-8') as fp: + yaml.dump(yml_data, fp) + + + +def parallelize_files(proj_dir, usage_scenario_file, docker_compose='compose.yml', parallel_id=1234): + if docker_compose is None: + docker_compose = 'compose.yml' + usage_scenario_path = os.path.join(proj_dir, usage_scenario_file) + docker_compose_path = os.path.join(proj_dir, docker_compose) + + # need to do docker compose first, in case its loaded by the usage_scenario + edit_yml_with_id(docker_compose_path, parallel_id) + edit_yml_with_id(usage_scenario_path, parallel_id) + def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='folder', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, @@ -68,11 +103,19 @@ def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='f else: docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/compose.yml') - if uri == 'default': + + if uri_type == 'folder': if dir_name is None: dir_name = utils.randomword(12) make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) uri = os.path.join(CURRENT_DIR, 'tmp/', dir_name) + parallelize_files(uri, usage_scenario, docker_compose, parallel_id) + elif uri_type == 'URL': + if uri[0:8] != 'https://' and uri[0:7] != 'http://': + raise ValueError("Invalid uri for URL") + else: + raise ValueError("Invalid uri_type") + RUN_NAME = 'test_' + utils.randomword(12) @@ -81,7 +124,9 @@ def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='f skip_unsafe=skip_unsafe, verbose_provider_boot=verbose_provider_boot, dev_no_build=dev_no_build, skip_system_checks=skip_system_checks, dev_no_sleeps=dev_no_sleeps, dev_no_metrics=dev_no_metrics) - return parallelize_runner(runner, parallel_id) + parallelize_runner(runner, parallel_id) + + return runner # This function runs the runner up to and *including* the specified step @@ -195,4 +240,4 @@ def create_test_file(path): # test this file if __name__ == '__main__': - setup_runner('network_stress.yml', 'compose.yml') + setup_runner('import_error.yml', parallel_id=123) From fa3e70822ca6e39dd1574cc47edee36279844845 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Thu, 8 Feb 2024 18:44:49 +0100 Subject: [PATCH 13/27] - updated pytest runstring; setup tests to use new test_functions with parallelization correctly; gmt tmp folders now have parallel id pre-pended, not appended; fix issue with yml dumping during parallelization where key order was not preserved; better default parameters for setup_runner; added flags for setup_runner to fine-grain how it is used --- .../usage_scenarios/stress_application.yml | 40 ++++ tests/run-tests.sh | 2 +- tests/smoke_test.py | 8 +- tests/stress-application/usage_scenario.yml | 1 - tests/test_functions.py | 55 +++--- tests/test_runner.py | 1 + tests/test_usage_scenario.py | 28 ++- tests/test_volume_loading.py | 172 +++++++++++------- tests/test_yml_parsing.py | 11 +- 9 files changed, 211 insertions(+), 107 deletions(-) create mode 100644 tests/data/usage_scenarios/stress_application.yml diff --git a/tests/data/usage_scenarios/stress_application.yml b/tests/data/usage_scenarios/stress_application.yml new file mode 100644 index 000000000..0cbe010e3 --- /dev/null +++ b/tests/data/usage_scenarios/stress_application.yml @@ -0,0 +1,40 @@ +--- +# Important +# Please remember that anything in this file changes the structural change should +# also be reflected in the simple example we provide in the documentation: +# https://docs.green-coding.io/docs/measuring/measuring-locally/ + + +name: Stress Container One Core 5 Seconds +author: Arne Tarara +description: test +description: test + +networks: + network-for-pytests: + +services: + ubuntu-stress: + type: container + image: gcb_stress + networks: + - network-for-pytests + build: + context: . + dockerfile: Dockerfile + + ubuntu-stress-2: + type: container + image: gcb_stress # this will reuse the image earlier built + networks: + - network-for-pytests + +flow: + - name: Stress + container: ubuntu-stress + commands: +# Alpine does not have stress, so we use stress-ng +# We need the -q flag because otherwise it will write debug to STDERR + - type: console + command: stress-ng -c 1 -t 1 -q + note: Starting Stress diff --git a/tests/run-tests.sh b/tests/run-tests.sh index a32f9e139..95696e1d6 100755 --- a/tests/run-tests.sh +++ b/tests/run-tests.sh @@ -3,7 +3,7 @@ echo "Starting test containers..." ./start-test-containers.sh &>/dev/null & sleep 2 echo "Running pytest..." -pytest -n auto -m "not serial" +pytest -n auto -m "not serial" --dist loadgroup pytest -m "serial" echo "Stopping test containers..." ./stop-test-containers.sh &>/dev/null & diff --git a/tests/smoke_test.py b/tests/smoke_test.py index 27c6ef8e0..6b703d973 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -2,6 +2,7 @@ import os import subprocess import re +import pytest CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -10,7 +11,7 @@ from lib.db import DB from lib import utils from lib.global_config import GlobalConfig -from runner import Runner +from tests import test_functions as Tests run_stderr = None run_stdout = None @@ -28,7 +29,7 @@ def setup_module(): subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) # Run the application - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=False, skip_system_checks=False) + runner = Tests.setup_runner(usage_scenario="stress_application.yml", name=RUN_NAME, uri=uri, uri_type='folder', dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=False, skip_system_checks=False) runner.run() #pylint: disable=global-statement @@ -36,15 +37,18 @@ def setup_module(): run_stderr = err.getvalue() run_stdout = out.getvalue() +@pytest.mark.xdist_group(name="systems_checks") def test_no_errors(): # Assert that there is no std.err output assert run_stderr == '' +@pytest.mark.xdist_group(name="systems_checks") def test_cleanup_success(): # Assert that Cleanup has run assert re.search( 'MEASUREMENT SUCCESSFULLY COMPLETED', run_stdout) +@pytest.mark.xdist_group(name="systems_checks") def test_db_rows_are_written_and_presented(): # for every metric provider, check that there were rows written in the DB with info for that provider # also check (in the same test, to save on a DB call) that the output to STD.OUT diff --git a/tests/stress-application/usage_scenario.yml b/tests/stress-application/usage_scenario.yml index 0cbe010e3..f4e898f5f 100644 --- a/tests/stress-application/usage_scenario.yml +++ b/tests/stress-application/usage_scenario.yml @@ -8,7 +8,6 @@ name: Stress Container One Core 5 Seconds author: Arne Tarara description: test -description: test networks: network-for-pytests: diff --git a/tests/test_functions.py b/tests/test_functions.py index 1e4bfe537..44d77bea1 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -19,13 +19,14 @@ def make_proj_dir(dir_name, usage_scenario_path, docker_compose_path=None): if not os.path.exists('tmp/' + dir_name): os.mkdir('tmp/' + dir_name) - shutil.copy2(usage_scenario_path, os.path.join(CURRENT_DIR, 'tmp' ,dir_name)) + dir_path = os.path.join(CURRENT_DIR, 'tmp' ,dir_name) + shutil.copy2(usage_scenario_path, dir_path) # copy over compose.yml and Dockerfile (from stress for now) if docker_compose_path is not None: shutil.copy2(docker_compose_path, os.path.join(CURRENT_DIR, 'tmp' ,dir_name)) dockerfile = os.path.join(CURRENT_DIR, 'stress-application/Dockerfile') shutil.copy2(dockerfile, os.path.join(CURRENT_DIR, 'tmp' ,dir_name)) - return dir_name + return dir_path def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filename): with open(usage_scenario_path, 'r', encoding='utf-8') as file: @@ -34,8 +35,8 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena with open(usage_scenario_path, 'w', encoding='utf-8') as file: file.write(data) -def parallelize_runner(runner, parallel_id): - runner._tmp_folder = f"/tmp/green-metrics-tool/{parallel_id}" +def parallelize_runner_folders(runner, parallel_id): + runner._tmp_folder = f"/tmp/gmt_tests_{parallel_id}/green-metrics-tool/" runner._folder = f"{runner._tmp_folder}/repo" def edit_yml_with_id(yml_path, parallel_id): @@ -78,11 +79,11 @@ def edit_yml_with_id(yml_path, parallel_id): # Save the updated YAML file with open(yml_path, 'w', encoding='utf-8') as fp: - yaml.dump(yml_data, fp) + yaml.dump(yml_data, fp, sort_keys=False) #sort_keys=False preserves the original order - - -def parallelize_files(proj_dir, usage_scenario_file, docker_compose='compose.yml', parallel_id=1234): +def parallelize_files(proj_dir, usage_scenario_file, docker_compose='compose.yml', parallel_id=None): + if parallel_id is None: + parallel_id = utils.randomword(12) if docker_compose is None: docker_compose = 'compose.yml' usage_scenario_path = os.path.join(proj_dir, usage_scenario_file) @@ -93,38 +94,46 @@ def parallelize_files(proj_dir, usage_scenario_file, docker_compose='compose.yml edit_yml_with_id(usage_scenario_path, parallel_id) -def setup_runner(usage_scenario, docker_compose=None, uri='default', uri_type='folder', branch=None, - debug_mode=False, allow_unsafe=False, no_file_cleanup=False, +def setup_runner(name=None, usage_scenario="usage_scenario.yml", docker_compose=None, uri='default', + uri_type='folder', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=False, skip_system_checks=True, - dev_no_sleeps=True, dev_no_metrics=True, parallel_id=None): - usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario) - if docker_compose is not None: - docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose) - else: - docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/compose.yml') + dev_no_sleeps=True, dev_no_metrics=True, parallel_id=None, create_tmp_directory=True, do_parallelize_files=True): + if parallel_id is None: + parallel_id = utils.randomword(12) + # parallelization of files only for uri_type folders, so far + # because url type means we are checking out a repo, and that happens already too late if uri_type == 'folder': if dir_name is None: - dir_name = utils.randomword(12) - make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + dir_name = parallel_id + + if create_tmp_directory: + if docker_compose is not None: + docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose) + else: + docker_compose_path = os.path.join(CURRENT_DIR, 'data/docker-compose-files/compose.yml') + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario) + make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + uri = os.path.join(CURRENT_DIR, 'tmp/', dir_name) - parallelize_files(uri, usage_scenario, docker_compose, parallel_id) + if do_parallelize_files: + parallelize_files(uri, usage_scenario, docker_compose, parallel_id) elif uri_type == 'URL': if uri[0:8] != 'https://' and uri[0:7] != 'http://': raise ValueError("Invalid uri for URL") else: raise ValueError("Invalid uri_type") + if name is None: + name = f'test_{parallel_id}' - RUN_NAME = 'test_' + utils.randomword(12) - - runner = Runner(name=RUN_NAME, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, + runner = Runner(name=name, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, debug_mode=debug_mode, allow_unsafe=allow_unsafe, no_file_cleanup=no_file_cleanup, skip_unsafe=skip_unsafe, verbose_provider_boot=verbose_provider_boot, dev_no_build=dev_no_build, skip_system_checks=skip_system_checks, dev_no_sleeps=dev_no_sleeps, dev_no_metrics=dev_no_metrics) - parallelize_runner(runner, parallel_id) + parallelize_runner_folders(runner, parallel_id) return runner diff --git a/tests/test_runner.py b/tests/test_runner.py index 97d368463..2d6bac1b1 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -35,6 +35,7 @@ def test_check_system(skip_system_checks, expectation): del GlobalConfig().config['measurement']['metric-providers']['common']['psu.energy.ac.foo.machine.provider.SomeProvider'] del GlobalConfig().config['measurement']['metric-providers']['common']['psu.energy.ac.bar.machine.provider.SomeOtherProvider'] +@pytest.mark.xdist_group(name="systems_checks") def test_reporters_still_running(): if GlobalConfig().config['measurement']['metric-providers']['linux'] is None: GlobalConfig().config['measurement']['metric-providers']['linux'] = {} diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index f443edbac..7ce8e1d01 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -339,25 +339,28 @@ def test_depends_on_error_not_running(): Tests.assertion_info(f"test-container-2_{parallel_id} is not running", str(e.value)) def test_depends_on_error_cyclic_dependency(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id=utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - - assert "Cycle found in depends_on definition with service 'test-container-1'" in str(e.value) , \ - Tests.assertion_info("cycle in depends_on with test-container-1", str(e.value)) + container_name=f"test-container-1_{parallel_id}" + assert f"Cycle found in depends_on definition with service '{container_name}'" in str(e.value) , \ + Tests.assertion_info(f"cycle in depends_on with {container_name}", str(e.value)) def test_depends_on_error_unsupported_condition(): - runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + parallel_id = utils.randomword(12) + runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - message = "Unsupported condition in healthcheck for service \'test-container-1\': service_completed_successfully" + container_name=f"test-container-1_{parallel_id}" + message = f"Unsupported condition in healthcheck for service \'{container_name}\': service_completed_successfully" assert message in str(e.value) , \ Tests.assertion_info(message, str(e.value)) @@ -528,6 +531,7 @@ def test_uri_local_dir_missing(): Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) # basic positive case +@pytest.mark.serial def test_uri_github_repo(): uri = 'https://github.com/green-coding-berlin/pytest-dummy-repo' RUN_NAME = 'test_' + utils.randomword(12) @@ -559,6 +563,7 @@ def test_uri_local_branch(): # basic positive case, branch prepped ahead of time # this branch has a different usage_scenario file name - basic_stress # that makes sure that it really is pulling a different branch +@pytest.mark.serial def test_uri_github_repo_branch(): uri = 'https://github.com/green-coding-berlin/pytest-dummy-repo' RUN_NAME = 'test_' + utils.randomword(12) @@ -580,6 +585,7 @@ def test_uri_github_repo_branch(): # give incorrect branch name ## Is the expected_exception OK or should it have a more graceful error? ## ATM this is just the default console error of a failed git command +@pytest.mark.serial def test_uri_github_repo_branch_missing(): runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='https://github.com/green-coding-berlin/pytest-dummy-repo', @@ -597,6 +603,7 @@ def test_uri_github_repo_branch_missing(): # # --name NAME # # A name which will be stored to the database to discern this run from others +@pytest.mark.serial def test_name_is_in_db(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) RUN_NAME = 'test_' + utils.randomword(12) @@ -614,6 +621,7 @@ def test_name_is_in_db(): # --filename FILENAME # An optional alternative filename if you do not want to use "usage_scenario.yml" # basic positive case +@pytest.mark.serial def test_different_filename(): usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', 'basic_stress.yml') dir_name = utils.randomword(12) @@ -654,6 +662,7 @@ def test_different_filename_missing(): # --no-file-cleanup # Do not delete files in /tmp/green-metrics-tool +@pytest.mark.serial def test_no_file_cleanup(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) RUN_NAME = 'test_' + utils.randomword(12) @@ -671,10 +680,11 @@ def test_no_file_cleanup(): #pylint: disable=unused-variable def test_skip_and_allow_unsafe_both_true(): with pytest.raises(RuntimeError) as e: - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True, dev_no_build=True) expected_exception = 'Cannot specify both --skip-unsafe and --allow-unsafe' assert str(e.value) == expected_exception, Tests.assertion_info('', str(e.value)) +@pytest.mark.serial def test_debug(monkeypatch): monkeypatch.setattr('sys.stdin', io.StringIO('Enter')) uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) @@ -697,7 +707,7 @@ def test_debug(monkeypatch): # can check for this note in the DB and the notes are about 2s apart def test_read_detached_process_no_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml', dev_no_build=True) out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): @@ -711,7 +721,7 @@ def test_read_detached_process_no_exit(): Tests.assertion_info('NOT successful run completed', out.getvalue()) def test_read_detached_process_after_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml', dev_no_build=True) out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index 2d0c2da3a..7b25704ae 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -10,10 +10,8 @@ import pytest from tests import test_functions as Tests - from lib import utils from lib.global_config import GlobalConfig -from runner import Runner GlobalConfig().override_config(config_name='test-config.yml') @@ -31,9 +29,19 @@ def check_if_container_running(container_name): def test_volume_load_no_escape(): parallel_id = utils.randomword(12) - tmp_dir = os.path.join(CURRENT_DIR, 'tmp', parallel_id, 'basic_stress_w_import.yml') - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_etc_passwords.yml', dir_name=parallel_id, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) - Tests.replace_include_in_usage_scenario(tmp_dir, 'volume_load_etc_passwords.yml') + + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_etc_passwords.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) + + Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, 'volume_load_etc_passwords.yml') + + runner = Tests.setup_runner( usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) try: with pytest.raises(RuntimeError) as e: @@ -42,23 +50,13 @@ def test_volume_load_no_escape(): container_running = check_if_container_running(f"test-container_{parallel_id}") runner.cleanup() - expected_error = 'Service \'test-container\' volume path (/etc/passwd) is outside allowed folder:' + container_name = f'test-container_{parallel_id}' + expected_error = f'Service \'{container_name}\' volume path (/etc/passwd) is outside allowed folder:' assert str(e.value).startswith(expected_error), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info('test-container stopped', 'test-container was still running!') - -def create_tmp_dir(): - tmp_dir_name = utils.randomword(12) - if not os.path.exists(os.path.join(CURRENT_DIR, 'tmp/')): - os.mkdir(os.path.join(CURRENT_DIR, 'tmp/')) - os.mkdir('tmp/' + tmp_dir_name) - tmp_dir = os.path.join(CURRENT_DIR, f'tmp/{tmp_dir_name}') - return tmp_dir, tmp_dir_name - -def copy_compose_and_edit_directory(compose_file, tmp_dir): - tmp_compose_file = os.path.join(tmp_dir, 'docker-compose.yml') - shutil.copyfile( - os.path.join(CURRENT_DIR, f'data/docker-compose-files/{compose_file}'), - tmp_compose_file) + assert container_running is False, Tests.assertion_info(f'{container_name} stopped', f'{container_name} was still running!') + +def edit_compose_file(compose_file, tmp_dir): + tmp_compose_file = os.path.join(tmp_dir, compose_file) #regex replace CURRENT_DIR in docker-compose.yml with temp proj directory where test-file exists with open(tmp_compose_file, 'r', encoding='utf-8') as file: @@ -68,16 +66,21 @@ def copy_compose_and_edit_directory(compose_file, tmp_dir): file.write(data) def test_load_files_from_within_gmt(): - tmp_dir, tmp_dir_name = create_tmp_dir() - Tests.create_test_file(tmp_dir) + parallel_id = utils.randomword(12) - # copy compose file over so that we can edit it safely - copy_compose_and_edit_directory('volume_load_within_proj.yml', tmp_dir) + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_within_proj.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) - # setup runner and run test - parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) - Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') + dir_path = Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + edit_compose_file(docker_compose_file, dir_path) + Tests.create_test_file(dir_path) + + runner = Tests.setup_runner(usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) try: Tests.run_until(runner, 'setup_services') @@ -97,56 +100,81 @@ def test_load_files_from_within_gmt(): assert "File mounted" in out, Tests.assertion_info('/tmp/test-file mounted', f"out: {out} | err: {err}") def test_symlinks_should_fail(): - tmp_dir, tmp_dir_name = create_tmp_dir() - # make a symlink to /etc/passwords in tmp_dir - symlink = os.path.join(tmp_dir, 'symlink') - os.symlink('/etc/passwd', os.path.join(tmp_dir, 'symlink')) + parallel_id = utils.randomword(12) - copy_compose_and_edit_directory('volume_load_symlinks_negative.yml', tmp_dir) + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_symlinks_negative.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) - parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) - Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') + dir_path = Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + edit_compose_file(docker_compose_file, dir_path) + + # make a symlink to /etc/passwords in tmp_dir + symlink = os.path.join(dir_path, 'symlink') + os.symlink('/etc/passwd', os.path.join(dir_path, 'symlink')) + runner = Tests.setup_runner( usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) + + container_name = f'test-container_{parallel_id}' try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running(f"test-container_{parallel_id}") + container_running = check_if_container_running(container_name) runner.cleanup() - expected_error = f"Service 'test-container' volume path ({symlink}) is outside allowed folder:" + expected_error = f"Service '{container_name}' volume path ({symlink}) is outside allowed folder:" assert str(e.value).startswith(expected_error), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info(f"test-container_{parallel_id} stopped", f"test-container_{parallel_id} was still running!") + assert container_running is False, Tests.assertion_info(f"{container_name} stopped", f"{container_name} was still running!") def test_non_bind_mounts_should_fail(): - tmp_dir_name = create_tmp_dir()[1] - tmp_dir_usage = os.path.join(CURRENT_DIR, 'tmp', tmp_dir_name, 'basic_stress_w_import.yml') - parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', docker_compose='volume_load_non_bind_mounts.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) - Tests.replace_include_in_usage_scenario(tmp_dir_usage, 'volume_load_non_bind_mounts.yml') + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_non_bind_mounts.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) + + Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + + runner = Tests.setup_runner(usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, + parallel_id=parallel_id, create_tmp_directory=False) + + container_name=f'test-container_{parallel_id}' try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running(f"test-container_{parallel_id}") + container_running = check_if_container_running(container_name) runner.cleanup() expected_error = 'volume path does not exist' assert expected_error in str(e.value), Tests.assertion_info(expected_error, str(e.value)) - assert container_running is False, Tests.assertion_info(f"test-container_{parallel_id} stopped", f"test-container_{parallel_id} was still running!") + assert container_running is False, Tests.assertion_info(f"{container_name} stopped", f"{container_name} was still running!") def test_load_volume_references(): - tmp_dir, tmp_dir_name = create_tmp_dir() - Tests.create_test_file(tmp_dir) + parallel_id = utils.randomword(12) - copy_compose_and_edit_directory('volume_load_references.yml', tmp_dir) + usage_scenario_file="basic_stress_w_import.yml" + usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario_file) + docker_compose_file="volume_load_references.yml" + docker_compose_path=os.path.join(CURRENT_DIR, 'data/docker-compose-files/', docker_compose_file) - parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='basic_stress_w_import.yml', dir_name=tmp_dir_name, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) - Tests.replace_include_in_usage_scenario(os.path.join(tmp_dir, 'basic_stress_w_import.yml'), 'docker-compose.yml') + dir_path = Tests.make_proj_dir(dir_name=parallel_id, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) + tmp_usage_scenario = os.path.join(CURRENT_DIR, 'tmp', parallel_id, usage_scenario_file) + Tests.replace_include_in_usage_scenario(tmp_usage_scenario, docker_compose_file) + edit_compose_file(docker_compose_file, dir_path) + + Tests.create_test_file(dir_path) + runner = Tests.setup_runner( + usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, dir_name=parallel_id, + parallel_id=parallel_id, create_tmp_directory=False) try: Tests.run_until(runner, 'setup_services') @@ -165,11 +193,27 @@ def test_load_volume_references(): Tests.cleanup(runner) assert "File mounted" in out, Tests.assertion_info('/tmp/test-file mounted', f"out: {out} | err: {err}") +def prepare_subdir_tmp_directory(parallel_id): + test_case_path=os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') + tmp_dir_path=os.path.join(CURRENT_DIR, 'tmp', parallel_id) + shutil.copytree(test_case_path, tmp_dir_path) + + usage_scenario_path=os.path.join(tmp_dir_path, 'usage_scenario.yml') + compose_yaml_path=os.path.join(tmp_dir_path, 'compose.yaml') + subdir_usage_scenario_path=os.path.join(tmp_dir_path, 'subdir/', 'usage_scenario_subdir.yml') + subdir2_usage_scenario_path=os.path.join(tmp_dir_path, 'subdir/subdir2', 'usage_scenario_subdir2.yml') + + Tests.edit_yml_with_id(usage_scenario_path, parallel_id) + Tests.edit_yml_with_id(compose_yaml_path, parallel_id) + Tests.edit_yml_with_id(subdir_usage_scenario_path, parallel_id) + Tests.edit_yml_with_id(subdir2_usage_scenario_path, parallel_id) + + return tmp_dir_path + def test_volume_loading_subdirectories_root(): - uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') - RUN_NAME = 'test_' + utils.randomword(12) parallel_id = utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) + prepare_subdir_tmp_directory(parallel_id) + runner = Tests.setup_runner(do_parallelize_files=False, parallel_id=parallel_id, create_tmp_directory=False) out = io.StringIO() err = io.StringIO() @@ -191,14 +235,14 @@ def test_volume_loading_subdirectories_root(): expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: [s'docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir(): - uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') - RUN_NAME = 'test_' + utils.randomword(12) parallel_id = utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/usage_scenario_subdir.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) + prepare_subdir_tmp_directory(parallel_id) + runner = Tests.setup_runner(usage_scenario='subdir/usage_scenario_subdir.yml', + do_parallelize_files=False, parallel_id=parallel_id, create_tmp_directory=False) out = io.StringIO() err = io.StringIO() @@ -214,11 +258,11 @@ def test_volume_loading_subdirectories_subdir(): expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") -def HELP_test_volume_loading_subdirectories_subdir2(): - uri = os.path.join(CURRENT_DIR, 'data/test_cases/subdir_volume_loading') - RUN_NAME = 'test_' + utils.randomword(12) +def test_volume_loading_subdirectories_subdir2(): parallel_id = utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename="subdir/subdir2/usage_scenario_subdir2.yml", skip_system_checks=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=False, parallel_id=parallel_id) + prepare_subdir_tmp_directory(parallel_id) + runner = Tests.setup_runner(usage_scenario='subdir/subdir2/usage_scenario_subdir2.yml', + do_parallelize_files=False, parallel_id=parallel_id, create_tmp_directory=False) out = io.StringIO() err = io.StringIO() diff --git a/tests/test_yml_parsing.py b/tests/test_yml_parsing.py index f72378c2c..8e6b977e4 100644 --- a/tests/test_yml_parsing.py +++ b/tests/test_yml_parsing.py @@ -5,18 +5,17 @@ from lib import utils from lib.global_config import GlobalConfig -from runner import Runner +from tests import test_functions as Tests GlobalConfig().override_config(config_name='test-config.yml') class TestYML(unittest.TestCase): def test_includes(self): - test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_one_root.yml' name = 'test_' + utils.randomword(12) - runner = Runner(name=name, uri=test_dir, uri_type='folder', filename=test_root_file) + runner = Tests.setup_runner(usage_scenario=test_root_file, name=name, uri_type='folder') runner.checkout_repository() # We need to do this to setup the file paths correctly runner.load_yml_file() @@ -28,11 +27,10 @@ def test_includes(self): self.assertEqual(result_obj, runner._usage_scenario) def test_(self): - test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_two_root.yml' name = 'test_' + utils.randomword(12) - runner = Runner(name=name, uri=test_dir, uri_type='folder', filename=test_root_file) + runner = Tests.setup_runner(usage_scenario=test_root_file, name=name, uri_type='folder') runner.checkout_repository() # We need to do this to setup the file paths correctly runner.load_yml_file() @@ -51,8 +49,7 @@ def test_(self): def test_invalid_path(self): name = 'test_' + utils.randomword(12) - test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_error.yml' - runner = Runner(name=name, uri=test_dir, uri_type='folder', filename=test_root_file) + runner = Tests.setup_runner(usage_scenario=test_root_file, name=name, uri_type='folder') runner.checkout_repository() # We need to do this to setup the file paths correctly self.assertRaises(ValueError, runner.load_yml_file) From c160fac05916aa40fef7c87fb62dbc13661069f2 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 9 Feb 2024 15:01:03 +0100 Subject: [PATCH 14/27] smoke tests now use temp directory instead of stress-application directory; - isntead of _ for parallel id; depends_on now properly writes yaml in test parallelization; setup_runner doesn't override uri if its passed in; yml_parsing tests serialized; --- runner.py | 2 - tests/smoke_test.py | 10 +++- tests/test_functions.py | 32 +++++++----- tests/test_usage_scenario.py | 94 ++++++++++++++++++------------------ tests/test_volume_loading.py | 34 ++++++------- tests/test_yml_parsing.py | 16 ++++-- 6 files changed, 103 insertions(+), 85 deletions(-) diff --git a/runner.py b/runner.py index fbd213aec..38df4d885 100755 --- a/runner.py +++ b/runner.py @@ -72,7 +72,6 @@ def __init__(self, self._uri_type = uri_type self._original_filename = filename self._branch = branch - #DMM:MARK self._tmp_folder = "/tmp/green-metrics-tool" self._usage_scenario = {} self._architecture = utils.get_architecture() @@ -280,7 +279,6 @@ def check_running_containers(self): check=True, encoding='UTF-8') for line in result.stdout.splitlines(): for running_container in line.split(','): # if docker container has multiple tags, they will be split by comma, so we only want to - #DMM:MARK for service_name in self._usage_scenario.get('services', {}): if 'container_name' in self._usage_scenario['services'][service_name]: container_name = self._usage_scenario['services'][service_name]['container_name'] diff --git a/tests/smoke_test.py b/tests/smoke_test.py index 6b703d973..59ecb4933 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -3,6 +3,7 @@ import subprocess import re import pytest +import shutil CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -21,15 +22,20 @@ # Runs once per file before any test( #pylint: disable=expression-not-assigned def setup_module(): + parallel_id = utils.randomword(12) + test_case_path=os.path.join(CURRENT_DIR, 'stress-application/') + tmp_dir_path=os.path.join(CURRENT_DIR, 'tmp', parallel_id) + shutil.copytree(test_case_path, tmp_dir_path) + out = io.StringIO() err = io.StringIO() GlobalConfig(config_name='test-config.yml').config with redirect_stdout(out), redirect_stderr(err): - uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) + uri = os.path.abspath(tmp_dir_path) subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) # Run the application - runner = Tests.setup_runner(usage_scenario="stress_application.yml", name=RUN_NAME, uri=uri, uri_type='folder', dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=False, skip_system_checks=False) + runner = Tests.setup_runner(name=RUN_NAME, uri=uri, uri_type='folder', dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=False, skip_system_checks=False, create_tmp_directory=False, parallel_id=parallel_id) runner.run() #pylint: disable=global-statement diff --git a/tests/test_functions.py b/tests/test_functions.py index 44d77bea1..ef6f5b0df 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -36,7 +36,7 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena file.write(data) def parallelize_runner_folders(runner, parallel_id): - runner._tmp_folder = f"/tmp/gmt_tests_{parallel_id}/green-metrics-tool/" + runner._tmp_folder = f"/tmp/gmt_tests-{parallel_id}/green-metrics-tool/" runner._folder = f"{runner._tmp_folder}/repo" def edit_yml_with_id(yml_path, parallel_id): @@ -46,7 +46,7 @@ def edit_yml_with_id(yml_path, parallel_id): # Update services services_copy = dict(yml_data.get('services', {})) for service_name, service_info in services_copy.items(): - new_service_name = f"{service_name}_{parallel_id}" + new_service_name = f"{service_name}-{parallel_id}" yml_data['services'][new_service_name] = service_info del yml_data['services'][service_name] @@ -54,28 +54,34 @@ def edit_yml_with_id(yml_path, parallel_id): service_networks = service_info.get('networks') if service_networks: if isinstance(service_networks, list): - service_info['networks'] = [f"{network}_{parallel_id}" for network in service_networks] + service_info['networks'] = [f"{network}-{parallel_id}" for network in service_networks] elif isinstance(service_networks, dict): - service_info['networks'] = {f"{key}_{parallel_id}": value for key, value in service_networks.items()} + service_info['networks'] = {f"{key}-{parallel_id}": value for key, value in service_networks.items()} if 'container_name' in service_info: - service_info['container_name'] = f"{service_info['container_name']}_{parallel_id}" + service_info['container_name'] = f"{service_info['container_name']}-{parallel_id}" if 'depends_on' in service_info: - service_info['depends_on'] = [f"{dep}_{parallel_id}" for dep in service_info['depends_on']] + if isinstance(service_info['depends_on'], list): + service_info['depends_on'] = [f"{dep}-{parallel_id}" for dep in service_info['depends_on']] + elif isinstance(service_info['depends_on'], dict): + service_info['depends_on'] = {f"{key}-{parallel_id}": value for key, value in service_info['depends_on'].items()} + else: + service_info['depends_on'] = f"{service_info['depends_on']}-{parallel_id}" + # top level networks networks = yml_data.get('networks') if networks: if isinstance(networks, list): - yml_data['networks'] = [f"{network}_{parallel_id}" for network in networks] + yml_data['networks'] = [f"{network}-{parallel_id}" for network in networks] elif isinstance(networks, dict): - yml_data['networks'] = {f"{key}_{parallel_id}": value for key, value in networks.items()} + yml_data['networks'] = {f"{key}-{parallel_id}": value for key, value in networks.items()} # Update container names in the flow section for item in yml_data.get('flow', []): if 'container' in item: - item['container'] = f"{item['container']}_{parallel_id}" + item['container'] = f"{item['container']}-{parallel_id}" # Save the updated YAML file with open(yml_path, 'w', encoding='utf-8') as fp: @@ -116,9 +122,11 @@ def setup_runner(name=None, usage_scenario="usage_scenario.yml", docker_compose= usage_scenario_path = os.path.join(CURRENT_DIR, 'data/usage_scenarios/', usage_scenario) make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=docker_compose_path) - uri = os.path.join(CURRENT_DIR, 'tmp/', dir_name) + tmp_dir_path = os.path.join(CURRENT_DIR, 'tmp/', dir_name) + if uri == 'default': + uri = tmp_dir_path if do_parallelize_files: - parallelize_files(uri, usage_scenario, docker_compose, parallel_id) + parallelize_files(tmp_dir_path, usage_scenario, docker_compose, parallel_id) elif uri_type == 'URL': if uri[0:8] != 'https://' and uri[0:7] != 'http://': raise ValueError("Invalid uri for URL") @@ -126,7 +134,7 @@ def setup_runner(name=None, usage_scenario="usage_scenario.yml", docker_compose= raise ValueError("Invalid uri_type") if name is None: - name = f'test_{parallel_id}' + name = f'test-{parallel_id}' runner = Runner(name=name, uri=uri, uri_type=uri_type, filename=usage_scenario, branch=branch, debug_mode=debug_mode, allow_unsafe=allow_unsafe, no_file_cleanup=no_file_cleanup, diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index 7ce8e1d01..d67f7ccee 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -46,7 +46,7 @@ def get_env_vars(runner, parallel_id): Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', f"test-container_{parallel_id}", '/bin/sh', + ['docker', 'exec', f"test-container-{parallel_id}", '/bin/sh', '-c', 'env'], check=True, stderr=subprocess.PIPE, @@ -105,7 +105,7 @@ def get_port_bindings(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'port', f"test-container_{parallel_id}", '9018'], + ['docker', 'port', f"test-container-{parallel_id}", '9018'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -133,7 +133,7 @@ def test_port_bindings_skip_unsafe_true(): # fail the test with redirect_stdout(out), redirect_stderr(err), pytest.raises(Exception): _, docker_port_err = get_port_bindings(runner, parallel_id) - expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container_{parallel_id}\n" + expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container-{parallel_id}\n" assert docker_port_err == expected_container_error, \ Tests.assertion_info(f"Container Error: {expected_container_error}", docker_port_err) expected_warning = 'Found ports entry but not running in unsafe mode. Skipping' @@ -145,7 +145,7 @@ def test_port_bindings_no_skip_or_allow(): runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) with pytest.raises(Exception) as e: _, docker_port_err = get_port_bindings(runner, parallel_id) - expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container_{parallel_id}\n" + expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container-{parallel_id}\n" assert docker_port_err == expected_container_error, \ Tests.assertion_info(f"Container Error: {expected_container_error}", docker_port_err) expected_error = 'Found "ports" but neither --skip-unsafe nor --allow-unsafe is set' @@ -166,7 +166,7 @@ def test_setup_commands_one_command(): Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - assert f"Running command: docker exec test-container_{parallel_id} sh -c ps -a" in out.getvalue(), \ + assert f"Running command: docker exec test-container-{parallel_id} sh -c ps -a" in out.getvalue(), \ Tests.assertion_info('stdout message: Running command: docker exec ps -a', out.getvalue()) assert '1 root 0:00 /bin/sh' in out.getvalue(), \ Tests.assertion_info('container stdout showing /bin/sh as process 1', 'different message in container stdout') @@ -183,15 +183,15 @@ def test_setup_commands_multiple_commands(): finally: runner.cleanup() - expected_pattern = re.compile(fr"Running command: docker exec test-container_{parallel_id} echo hello world.*\ + expected_pattern = re.compile(fr"Running command: docker exec test-container-{parallel_id} echo hello world.*\ \s*Stdout: hello world.*\ \s*Stderr:.*\ -\s*Running command: docker exec test-container_{parallel_id} ps -a.*\ +\s*Running command: docker exec test-container-{parallel_id} ps -a.*\ \s*Stdout:\s+PID\s+USER\s+TIME\s+COMMAND.*\ \s*1\s+root\s+\d:\d\d\s+/bin/sh.*\ \s*1\d+\s+root\s+\d:\d\d\s+ps -a.*\ \s*Stderr:.*\ -\s*Running command: docker exec test-container_{parallel_id} echo goodbye world.*\ +\s*Running command: docker exec test-container-{parallel_id} echo goodbye world.*\ \s*Stdout: goodbye world.*\ ", re.MULTILINE) @@ -208,7 +208,7 @@ def get_contents_of_bound_volume(runner, parallel_id): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', f"test-container_{parallel_id}", 'ls', '/tmp/test-data'], + ['docker', 'exec', f"test-container-{parallel_id}", 'ls', '/tmp/test-data'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -245,9 +245,9 @@ def test_depends_on_order(): runner.cleanup() # Expected order: test-container-2, test-container-4, test-container-3, test-container-1 - assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-4_{parallel_id}") - assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-3_{parallel_id}") - assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-1_{parallel_id}") + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-4-{parallel_id}") + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-3-{parallel_id}") + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-1-{parallel_id}") def test_depends_on_huge(): @@ -263,67 +263,67 @@ def test_depends_on_huge(): runner.cleanup() # For test-container-20 - assert_order(out.getvalue(), f"test-container-16_{parallel_id}", f"test-container-20_{parallel_id}") - assert_order(out.getvalue(), f"test-container-15_{parallel_id}", f"test-container-20_{parallel_id}") + assert_order(out.getvalue(), f"test-container-16-{parallel_id}", f"test-container-20-{parallel_id}") + assert_order(out.getvalue(), f"test-container-15-{parallel_id}", f"test-container-20-{parallel_id}") # For test-container-19 - assert_order(out.getvalue(), f"test-container-14_{parallel_id}", f"test-container-19_{parallel_id}") - assert_order(out.getvalue(), f"test-container-13_{parallel_id}", f"test-container-19_{parallel_id}") + assert_order(out.getvalue(), f"test-container-14-{parallel_id}", f"test-container-19-{parallel_id}") + assert_order(out.getvalue(), f"test-container-13-{parallel_id}", f"test-container-19-{parallel_id}") # For test-container-18 - assert_order(out.getvalue(), f"test-container-12_{parallel_id}", f"test-container-18_{parallel_id}") - assert_order(out.getvalue(), f"test-container-11_{parallel_id}", f"test-container-18_{parallel_id}") + assert_order(out.getvalue(), f"test-container-12-{parallel_id}", f"test-container-18-{parallel_id}") + assert_order(out.getvalue(), f"test-container-11-{parallel_id}", f"test-container-18-{parallel_id}") # For test-container-17 - assert_order(out.getvalue(), f"test-container-10_{parallel_id}", f"test-container-17_{parallel_id}") - assert_order(out.getvalue(), f"test-container-9_{parallel_id}", f"test-container-17_{parallel_id}") + assert_order(out.getvalue(), f"test-container-10-{parallel_id}", f"test-container-17-{parallel_id}") + assert_order(out.getvalue(), f"test-container-9-{parallel_id}", f"test-container-17-{parallel_id}") # For test-container-16 - assert_order(out.getvalue(), f"test-container-8_{parallel_id}", f"test-container-16_{parallel_id}") - assert_order(out.getvalue(), f"test-container-7_{parallel_id}", f"test-container-16_{parallel_id}") + assert_order(out.getvalue(), f"test-container-8-{parallel_id}", f"test-container-16-{parallel_id}") + assert_order(out.getvalue(), f"test-container-7-{parallel_id}", f"test-container-16-{parallel_id}") # For test-container-15 - assert_order(out.getvalue(), f"test-container-6_{parallel_id}", f"test-container-15_{parallel_id}") - assert_order(out.getvalue(), f"test-container-5_{parallel_id}", f"test-container-15_{parallel_id}") + assert_order(out.getvalue(), f"test-container-6-{parallel_id}", f"test-container-15-{parallel_id}") + assert_order(out.getvalue(), f"test-container-5-{parallel_id}", f"test-container-15-{parallel_id}") # For test-container-14 - assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-14_{parallel_id}") + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-14-{parallel_id}") # For test-container-13 - assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-13_{parallel_id}") + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-13-{parallel_id}") # For test-container-12 - assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-12_{parallel_id}") + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-12-{parallel_id}") # For test-container-11 - assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-11_{parallel_id}") + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-11-{parallel_id}") # For test-container-10 - assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-10_{parallel_id}") + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-10-{parallel_id}") # For test-container-9 - assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-9_{parallel_id}") + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-9-{parallel_id}") # For test-container-8 - assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-8_{parallel_id}") + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-8-{parallel_id}") # For test-container-7 - assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-7_{parallel_id}") + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-7-{parallel_id}") # For test-container-6 - assert_order(out.getvalue(), f"test-container-4_{parallel_id}", f"test-container-6_{parallel_id}") + assert_order(out.getvalue(), f"test-container-4-{parallel_id}", f"test-container-6-{parallel_id}") # For test-container-5 - assert_order(out.getvalue(), f"test-container-3_{parallel_id}", f"test-container-5_{parallel_id}") + assert_order(out.getvalue(), f"test-container-3-{parallel_id}", f"test-container-5-{parallel_id}") # For test-container-4 - assert_order(out.getvalue(), f"test-container-2_{parallel_id}", f"test-container-4_{parallel_id}") + assert_order(out.getvalue(), f"test-container-2-{parallel_id}", f"test-container-4-{parallel_id}") # For test-container-3 - assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-3_{parallel_id}") + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-3-{parallel_id}") # For test-container-2 - assert_order(out.getvalue(), f"test-container-1_{parallel_id}", f"test-container-2_{parallel_id}") + assert_order(out.getvalue(), f"test-container-1-{parallel_id}", f"test-container-2-{parallel_id}") def test_depends_on_error_not_running(): @@ -335,8 +335,8 @@ def test_depends_on_error_not_running(): finally: runner.cleanup() - assert f"Dependent container 'test-container-2_{parallel_id}' of 'test-container-1_{parallel_id}' is not running" in str(e.value) , \ - Tests.assertion_info(f"test-container-2_{parallel_id} is not running", str(e.value)) + assert f"Dependent container 'test-container-2-{parallel_id}' of 'test-container-1-{parallel_id}' is not running" in str(e.value) , \ + Tests.assertion_info(f"test-container-2-{parallel_id} is not running", str(e.value)) def test_depends_on_error_cyclic_dependency(): parallel_id=utils.randomword(12) @@ -346,7 +346,7 @@ def test_depends_on_error_cyclic_dependency(): Tests.run_until(runner, 'setup_services') finally: runner.cleanup() - container_name=f"test-container-1_{parallel_id}" + container_name=f"test-container-1-{parallel_id}" assert f"Cycle found in depends_on definition with service '{container_name}'" in str(e.value) , \ Tests.assertion_info(f"cycle in depends_on with {container_name}", str(e.value)) @@ -359,7 +359,7 @@ def test_depends_on_error_unsupported_condition(): finally: runner.cleanup() - container_name=f"test-container-1_{parallel_id}" + container_name=f"test-container-1-{parallel_id}" message = f"Unsupported condition in healthcheck for service \'{container_name}\': service_completed_successfully" assert message in str(e.value) , \ Tests.assertion_info(message, str(e.value)) @@ -387,9 +387,9 @@ def test_depends_on_healthcheck(): try: with redirect_stdout(out), redirect_stderr(err): runner.run() - message = f"Health of container \'test-container-2_{parallel_id}\': starting" + message = f"Health of container \'test-container-2-{parallel_id}\': starting" assert message in out.getvalue(), Tests.assertion_info(message, out.getvalue()) - message2 = f"Health of container \'test-container-2_{parallel_id}\': healthy" + message2 = f"Health of container \'test-container-2-{parallel_id}\': healthy" assert message2 in out.getvalue(), Tests.assertion_info(message, out.getvalue()) finally: @@ -405,7 +405,7 @@ def test_depends_on_healthcheck_error_missing(): finally: runner.cleanup() - expected_exception = f"Health check for dependent_container 'test-container-2_{parallel_id}' was requested, but container has no healthcheck implemented!" + expected_exception = f"Health check for dependent_container 'test-container-2-{parallel_id}' was requested, but container has no healthcheck implemented!" assert str(e.value).startswith(expected_exception),\ Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) @@ -467,7 +467,7 @@ def test_container_is_in_network(): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'network', 'inspect', f"gmt-test-network_{parallel_id}"], + ['docker', 'network', 'inspect', f"gmt-test-network-{parallel_id}"], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -476,7 +476,7 @@ def test_container_is_in_network(): inspect = ps.stdout finally: Tests.cleanup(runner) - assert f"test-container_{parallel_id}" in inspect, Tests.assertion_info(f"test-container_{parallel_id}", inspect) + assert f"test-container-{parallel_id}" in inspect, Tests.assertion_info(f"test-container-{parallel_id}", inspect) # command: [str] (optional) # Command to be executed when container is started. @@ -488,7 +488,7 @@ def test_cmd_ran(): try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( - ['docker', 'exec', f"test-container_{parallel_id}", 'ps', '-a'], + ['docker', 'exec', f"test-container-{parallel_id}", 'ps', '-a'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index 7b25704ae..47e6de459 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -47,10 +47,10 @@ def test_volume_load_no_escape(): with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') finally: - container_running = check_if_container_running(f"test-container_{parallel_id}") + container_running = check_if_container_running(f"test-container-{parallel_id}") runner.cleanup() - container_name = f'test-container_{parallel_id}' + container_name = f'test-container-{parallel_id}' expected_error = f'Service \'{container_name}\' volume path (/etc/passwd) is outside allowed folder:' assert str(e.value).startswith(expected_error), Tests.assertion_info(expected_error, str(e.value)) assert container_running is False, Tests.assertion_info(f'{container_name} stopped', f'{container_name} was still running!') @@ -86,7 +86,7 @@ def test_load_files_from_within_gmt(): Tests.run_until(runner, 'setup_services') # check that the volume was loaded ps = subprocess.run( - ['docker', 'exec', f"test-container_{parallel_id}", '/bin/sh', + ['docker', 'exec', f"test-container-{parallel_id}", '/bin/sh', '-c', 'test -f /tmp/test-file && echo "File mounted"'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -119,7 +119,7 @@ def test_symlinks_should_fail(): runner = Tests.setup_runner( usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, parallel_id=parallel_id, create_tmp_directory=False) - container_name = f'test-container_{parallel_id}' + container_name = f'test-container-{parallel_id}' try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -146,7 +146,7 @@ def test_non_bind_mounts_should_fail(): runner = Tests.setup_runner(usage_scenario=usage_scenario_file, docker_compose=docker_compose_file, parallel_id=parallel_id, create_tmp_directory=False) - container_name=f'test-container_{parallel_id}' + container_name=f'test-container-{parallel_id}' try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -180,7 +180,7 @@ def test_load_volume_references(): Tests.run_until(runner, 'setup_services') # check that the volume was loaded ps = subprocess.run( - ['docker', 'exec', f"test-container-2_{parallel_id}", '/bin/sh', + ['docker', 'exec', f"test-container-2-{parallel_id}", '/bin/sh', '-c', 'test -f /tmp/test-file && echo "File mounted"'], stderr=subprocess.PIPE, stdout=subprocess.PIPE, @@ -223,19 +223,19 @@ def test_volume_loading_subdirectories_root(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_content_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root'] testfile-root-content" + expect_content_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root'] testfile-root-content" assert expect_content_testfile_root in run_stdout, Tests.assertion_info(expect_content_testfile_root, f"expected output not in {run_stdout}") - expect_extra_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root-extra-copied'] testfile-root-content" + expect_extra_testfile_root = f"stdout from process: ['docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile-root-content', '/tmp/testfile-root-extra-copied'] testfile-root-content" assert expect_extra_testfile_root in run_stdout, Tests.assertion_info(expect_extra_testfile_root, f"expected output not in {run_stdout}") - expect_mounted_testfile = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile-content', '/tmp/testfile-correctly-mounted'] testfile-content" + expect_mounted_testfile = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile-content', '/tmp/testfile-correctly-mounted'] testfile-content" assert expect_mounted_testfile in run_stdout, Tests.assertion_info(expect_mounted_testfile, f"expected output not in {run_stdout}") - expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = f"stdout from process: [s'docker', 'exec', 'test-container-root_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: [s'docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir(): @@ -252,10 +252,10 @@ def test_volume_loading_subdirectories_subdir(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-mounted'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir2(): @@ -272,14 +272,14 @@ def test_volume_loading_subdirectories_subdir2(): run_stdout = out.getvalue() assert run_stderr == '', Tests.assertion_info('stderr empty', f"stderr: {run_stderr}") - expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" + expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, "expected output not in {run_stdout}") - expect_copied_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-copied'] testfile2-content" + expect_copied_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-copied'] testfile2-content" assert expect_copied_testfile_2 in run_stdout, Tests.assertion_info(expect_copied_testfile_2, f"expected output not in {run_stdout}") - expect_copied_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_copied_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_copied_testfile_3 in run_stdout, Tests.assertion_info(expect_copied_testfile_3, f"expected output not in {run_stdout}") - expect_copied_testfile_4 = f"stdout from process: ['docker', 'exec', 'test-container_{parallel_id}', 'grep', 'testfile4-content', '/tmp/testfile4-correctly-copied'] testfile4-content" + expect_copied_testfile_4 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile4-content', '/tmp/testfile4-correctly-copied'] testfile4-content" assert expect_copied_testfile_4 in run_stdout, Tests.assertion_info(expect_copied_testfile_4, f"expected output not in {run_stdout}") diff --git a/tests/test_yml_parsing.py b/tests/test_yml_parsing.py index 8e6b977e4..127e7229e 100644 --- a/tests/test_yml_parsing.py +++ b/tests/test_yml_parsing.py @@ -1,21 +1,24 @@ import os import unittest +import pytest CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) from lib import utils from lib.global_config import GlobalConfig -from tests import test_functions as Tests +from runner import Runner GlobalConfig().override_config(config_name='test-config.yml') class TestYML(unittest.TestCase): + @pytest.mark.serial # the editing of the yml files makes the result_obj too different, this can be fixed later def test_includes(self): + test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_one_root.yml' name = 'test_' + utils.randomword(12) - runner = Tests.setup_runner(usage_scenario=test_root_file, name=name, uri_type='folder') + runner = Runner(name=name, uri=test_dir, uri_type='folder', filename=test_root_file) runner.checkout_repository() # We need to do this to setup the file paths correctly runner.load_yml_file() @@ -26,11 +29,13 @@ def test_includes(self): self.assertEqual(result_obj, runner._usage_scenario) + @pytest.mark.serial # the editing of the yml files makes the result_obj too different, this can be fixed later def test_(self): + test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_two_root.yml' name = 'test_' + utils.randomword(12) - runner = Tests.setup_runner(usage_scenario=test_root_file, name=name, uri_type='folder') + runner = Runner(name=name, uri=test_dir, uri_type='folder', filename=test_root_file) runner.checkout_repository() # We need to do this to setup the file paths correctly runner.load_yml_file() @@ -46,10 +51,11 @@ def test_(self): print(f"expect: {result_obj}") self.assertEqual(result_obj, runner._usage_scenario) - + @pytest.mark.serial #the parallization uses the yml loader, so this test will always fail before the assert def test_invalid_path(self): name = 'test_' + utils.randomword(12) + test_dir = os.path.join(CURRENT_DIR, 'data/usage_scenarios/') test_root_file = 'import_error.yml' - runner = Tests.setup_runner(usage_scenario=test_root_file, name=name, uri_type='folder') + runner = Runner(name=name, uri=test_dir, uri_type='folder', filename=test_root_file) runner.checkout_repository() # We need to do this to setup the file paths correctly self.assertRaises(ValueError, runner.load_yml_file) From d5564dca90dff0f3ea71ccefe05a592d4071d41e Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 9 Feb 2024 15:54:07 +0100 Subject: [PATCH 15/27] parallelize the tmp docker image --- runner.py | 3 ++- tests/test_functions.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/runner.py b/runner.py index 38df4d885..bece5c35b 100755 --- a/runner.py +++ b/runner.py @@ -82,6 +82,7 @@ def __init__(self, self._run_id = None self._commit_hash = None self._commit_timestamp = None + self._tmp_image_name = 'gmt_run_tmp' del self._arguments['self'] # self is not needed and also cannot be serialzed. We remove it @@ -443,7 +444,7 @@ def clean_image_name(self, name): name = re.sub(r'[^A-Za-z0-9_]', '', name) # only lowercase letters are allowed for tags name = name.lower() - name = f"{name}_gmt_run_tmp" + name = f"{name}_{self._tmp_image_name}" return name def build_docker_images(self): diff --git a/tests/test_functions.py b/tests/test_functions.py index ef6f5b0df..ef8effc68 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -38,6 +38,7 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena def parallelize_runner_folders(runner, parallel_id): runner._tmp_folder = f"/tmp/gmt_tests-{parallel_id}/green-metrics-tool/" runner._folder = f"{runner._tmp_folder}/repo" + runner._tmp_image_name = f"gmt_run_tmp_{parallel_id}" def edit_yml_with_id(yml_path, parallel_id): with open(yml_path, 'r', encoding='utf-8') as fp: From 3f4d7dfe1b95f42364b011373951588410216ddb Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 9 Feb 2024 16:42:29 +0100 Subject: [PATCH 16/27] serialize failing test --- tests/test_volume_loading.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index 47e6de459..a9bfc40f5 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -210,6 +210,9 @@ def prepare_subdir_tmp_directory(parallel_id): return tmp_dir_path +#TODO: figure out why this isn't working nicely during parallelization +# its failing in the VM, despite the fact that the stdout does indeed have the expected strings in them +@pytest.mark.serial def test_volume_loading_subdirectories_root(): parallel_id = utils.randomword(12) prepare_subdir_tmp_directory(parallel_id) From c9f28d5e3afc575181bc0dff6053b6f6e5c51dd8 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 9 Feb 2024 16:47:41 +0100 Subject: [PATCH 17/27] removed leftover unneeded fstring --- runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runner.py b/runner.py index bece5c35b..1a96529b6 100755 --- a/runner.py +++ b/runner.py @@ -592,9 +592,9 @@ def setup_services(self): services_ordered = self.order_services(services) for service_name, service in services_ordered.items(): if 'container_name' in service: - container_name = f"{service['container_name']}" + container_name = service['container_name'] else: - container_name = f"{service_name}" + container_name = service_name print(TerminalColors.HEADER, '\nSetting up container: ', container_name, TerminalColors.ENDC) From a67cdf3edb0410feef03315826d0bc950c7477ee Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 9 Feb 2024 17:14:11 +0100 Subject: [PATCH 18/27] fix typo in test_volume_loading_subdirectories_root failing test (sanity officially destroyed); update reference to green-coding-solutions in test_jobs --- tests/test_volume_loading.py | 4 +--- tests/tools/test_jobs.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_volume_loading.py b/tests/test_volume_loading.py index a9bfc40f5..8998fa5ad 100644 --- a/tests/test_volume_loading.py +++ b/tests/test_volume_loading.py @@ -210,8 +210,6 @@ def prepare_subdir_tmp_directory(parallel_id): return tmp_dir_path -#TODO: figure out why this isn't working nicely during parallelization -# its failing in the VM, despite the fact that the stdout does indeed have the expected strings in them @pytest.mark.serial def test_volume_loading_subdirectories_root(): parallel_id = utils.randomword(12) @@ -238,7 +236,7 @@ def test_volume_loading_subdirectories_root(): expect_mounted_testfile_2 = f"stdout from process: ['docker', 'exec', 'test-container-{parallel_id}', 'grep', 'testfile2-content', '/tmp/testfile2-correctly-mounted'] testfile2-content" assert expect_mounted_testfile_2 in run_stdout, Tests.assertion_info(expect_mounted_testfile_2, f"expected output not in {run_stdout}") - expect_mounted_testfile_3 = f"stdout from process: [s'docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" + expect_mounted_testfile_3 = f"stdout from process: ['docker', 'exec', 'test-container-root-{parallel_id}', 'grep', 'testfile3-content', '/tmp/testfile3-correctly-copied'] testfile3-content" assert expect_mounted_testfile_3 in run_stdout, Tests.assertion_info(expect_mounted_testfile_3, f"expected output not in {run_stdout}") def test_volume_loading_subdirectories_subdir(): diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index b6fda478e..9a49db9eb 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -67,7 +67,7 @@ def test_insert_job(): @pytest.mark.serial def test_simple_run_job(): name = utils.randomword(12) - url = 'https://github.com/green-coding-berlin/pytest-dummy-repo' + url = 'https://github.com/green-coding-solutions/pytest-dummy-repo' filename = 'usage_scenario.yml' Job.insert(name, url, 'Test Email', 'main', filename, 1) From 365bc6e8acebe430a123cb1e6330e54068f935cc Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 9 Feb 2024 17:26:50 +0100 Subject: [PATCH 19/27] don't run examples directory tests --- .github/workflows/tests-bare-metal-main.yml | 1 + .github/workflows/tests-eco-ci-energy-estimation.yaml | 1 + .github/workflows/tests-vm-main.yml | 1 + .github/workflows/tests-vm-pr.yml | 1 + 4 files changed, 4 insertions(+) diff --git a/.github/workflows/tests-bare-metal-main.yml b/.github/workflows/tests-bare-metal-main.yml index 0fbaa43a7..f7734bd5a 100644 --- a/.github/workflows/tests-bare-metal-main.yml +++ b/.github/workflows/tests-bare-metal-main.yml @@ -38,6 +38,7 @@ jobs: with: metrics-to-turn-off: 'Machine Sensors Debug MacOS' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: False - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@main diff --git a/.github/workflows/tests-eco-ci-energy-estimation.yaml b/.github/workflows/tests-eco-ci-energy-estimation.yaml index 39640efab..2adbad440 100644 --- a/.github/workflows/tests-eco-ci-energy-estimation.yaml +++ b/.github/workflows/tests-eco-ci-energy-estimation.yaml @@ -32,6 +32,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: False - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@testing diff --git a/.github/workflows/tests-vm-main.yml b/.github/workflows/tests-vm-main.yml index ddbbc0ff1..9506f94b0 100644 --- a/.github/workflows/tests-vm-main.yml +++ b/.github/workflows/tests-vm-main.yml @@ -36,6 +36,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: False - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@v2 diff --git a/.github/workflows/tests-vm-pr.yml b/.github/workflows/tests-vm-pr.yml index 45f5461e0..35d4a6efb 100644 --- a/.github/workflows/tests-vm-pr.yml +++ b/.github/workflows/tests-vm-pr.yml @@ -27,6 +27,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} + run-examples-directory-tests: False - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@v2 From 693fe74e009a3dff2e797135efd3513e2a4c009a Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 9 Feb 2024 18:12:30 +0100 Subject: [PATCH 20/27] debug statement --- tests/test_usage_scenario.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index d67f7ccee..7076ff41f 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -742,6 +742,7 @@ def test_read_detached_process_failure(): runner.run() finally: runner.cleanup() + print (out.getvalue()) assert '\'g4jiorejf\']\' had bad returncode: 126' in str(e.value), \ Tests.assertion_info('\'g4jiorejf\']\' had bad returncode: 126', str(e.value)) From 309a33092a37dab86f04b568c4ac9777e25e00db Mon Sep 17 00:00:00 2001 From: dan-mm Date: Tue, 13 Feb 2024 15:16:15 +0100 Subject: [PATCH 21/27] - found tests that were being run without no_build flag when they should have been; setup_runner now properly runs with no_build flag by default; cleaned up calls to setup_runner; build gcb_stress_gmt_run_tmp image before test runs; revert _tmp_image_name parallelization (unneeded); --- runner.py | 3 +- tests/conftest.py | 10 +++ tests/smoke_test.py | 2 +- .../compose_gmt_run_tmp.yml | 7 ++ tests/test_config_opts.py | 8 +-- tests/test_functions.py | 3 +- tests/test_runner.py | 4 +- tests/test_usage_scenario.py | 65 +++++++++---------- tests/tools/test_jobs.py | 5 -- 9 files changed, 52 insertions(+), 55 deletions(-) create mode 100644 tests/stress-application/compose_gmt_run_tmp.yml diff --git a/runner.py b/runner.py index 1a96529b6..cc12db580 100755 --- a/runner.py +++ b/runner.py @@ -82,7 +82,6 @@ def __init__(self, self._run_id = None self._commit_hash = None self._commit_timestamp = None - self._tmp_image_name = 'gmt_run_tmp' del self._arguments['self'] # self is not needed and also cannot be serialzed. We remove it @@ -444,7 +443,7 @@ def clean_image_name(self, name): name = re.sub(r'[^A-Za-z0-9_]', '', name) # only lowercase letters are allowed for tags name = name.lower() - name = f"{name}_{self._tmp_image_name}" + name = f"{name}_gmt_run_tmp" return name def build_docker_images(self): diff --git a/tests/conftest.py b/tests/conftest.py index ec76a9f57..b54a52ba7 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,6 +2,7 @@ import os import shutil from lib.db import DB +import subprocess ## VERY IMPORTANT to override the config file here ## otherwise it will automatically connect to non-test DB and delete all your real data @@ -33,6 +34,15 @@ def cleanup_temp_directories(): if os.path.exists("/tmp/gmt-test-data/"): shutil.rmtree("/tmp/gmt-test-data/") +def build_image_fixture(): + uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) + subprocess.run(['docker', 'compose', '-f', uri+'/compose_gmt_run_tmp.yml', 'build'], check=True) + GlobalConfig().override_config(config_name='test-config.yml') + +def pytest_sessionstart(session): + if not hasattr(session.config, 'workerinput'): + build_image_fixture() + def pytest_sessionfinish(session): if not hasattr(session.config, 'workerinput'): cleanup_tables() diff --git a/tests/smoke_test.py b/tests/smoke_test.py index 59ecb4933..46d7f1ccc 100644 --- a/tests/smoke_test.py +++ b/tests/smoke_test.py @@ -35,7 +35,7 @@ def setup_module(): subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) # Run the application - runner = Tests.setup_runner(name=RUN_NAME, uri=uri, uri_type='folder', dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=False, skip_system_checks=False, create_tmp_directory=False, parallel_id=parallel_id) + runner = Tests.setup_runner(name=RUN_NAME, uri=uri, uri_type='folder', dev_no_metrics=False, skip_system_checks=False, create_tmp_directory=False, parallel_id=parallel_id) runner.run() #pylint: disable=global-statement diff --git a/tests/stress-application/compose_gmt_run_tmp.yml b/tests/stress-application/compose_gmt_run_tmp.yml new file mode 100644 index 000000000..6cbc53596 --- /dev/null +++ b/tests/stress-application/compose_gmt_run_tmp.yml @@ -0,0 +1,7 @@ +version: '2' +services: + stress: + build: . + image: gcb_stress_gmt_run_tmps + container_name: gcb_stress + restart: always diff --git a/tests/test_config_opts.py b/tests/test_config_opts.py index 85e46185e..64764ce6e 100644 --- a/tests/test_config_opts.py +++ b/tests/test_config_opts.py @@ -1,5 +1,4 @@ import os -import subprocess import pytest CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -24,11 +23,6 @@ def reset_config_fixture(): config['measurement']['idle-time-end'] = idle_time_end config['measurement']['flow-process-runtime'] = flow_process_runtime -@pytest.fixture(autouse=True, scope="module", name="build_image") -def build_image_fixture(): - uri = os.path.abspath(os.path.join( - CURRENT_DIR, 'stress-application/')) - subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) #pylint: disable=expression-not-assigned def run_runner(): @@ -37,7 +31,7 @@ def run_runner(): # Run the application RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', verbose_provider_boot=True, dev_repeat_run=True, skip_system_checks=True) + runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', verbose_provider_boot=True, skip_system_checks=True) return runner.run() # Rethink how to do this test entirely diff --git a/tests/test_functions.py b/tests/test_functions.py index ef8effc68..ce5f39ebc 100644 --- a/tests/test_functions.py +++ b/tests/test_functions.py @@ -38,7 +38,6 @@ def replace_include_in_usage_scenario(usage_scenario_path, docker_compose_filena def parallelize_runner_folders(runner, parallel_id): runner._tmp_folder = f"/tmp/gmt_tests-{parallel_id}/green-metrics-tool/" runner._folder = f"{runner._tmp_folder}/repo" - runner._tmp_image_name = f"gmt_run_tmp_{parallel_id}" def edit_yml_with_id(yml_path, parallel_id): with open(yml_path, 'r', encoding='utf-8') as fp: @@ -103,7 +102,7 @@ def parallelize_files(proj_dir, usage_scenario_file, docker_compose='compose.yml def setup_runner(name=None, usage_scenario="usage_scenario.yml", docker_compose=None, uri='default', uri_type='folder', branch=None, debug_mode=False, allow_unsafe=False, no_file_cleanup=False, - skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=False, skip_system_checks=True, + skip_unsafe=False, verbose_provider_boot=False, dir_name=None, dev_no_build=True, skip_system_checks=True, dev_no_sleeps=True, dev_no_metrics=True, parallel_id=None, create_tmp_directory=True, do_parallelize_files=True): if parallel_id is None: diff --git a/tests/test_runner.py b/tests/test_runner.py index 2d6bac1b1..9a46d5a0e 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -47,9 +47,9 @@ def test_reporters_still_running(): } GlobalConfig().config['measurement']['metric-providers']['linux'].update(real_provider) - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_sleeps=True, dev_no_build=True, dev_no_metrics=False) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_metrics=False) - runner2 = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_sleeps=True, dev_no_build=True, dev_no_metrics=False) + runner2 = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, skip_system_checks=False, dev_no_metrics=False) runner.check_system('start') # should not fail diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index 7076ff41f..427d9ba3b 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -27,13 +27,6 @@ # Always do asserts after try:finally: blocks # otherwise failing Tests will not run the runner.cleanup() properly -# This should be done once per module -@pytest.fixture(autouse=True, scope="module", name="build_image") -def build_image_fixture(): - uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - subprocess.run(['docker', 'compose', '-f', uri+'/compose.yml', 'build'], check=True) - GlobalConfig().override_config(config_name='test-config.yml') - # This function runs the runner up to and *including* the specified step #pylint: disable=redefined-argument-from-local ### The Tests for usage_scenario configurations @@ -61,7 +54,7 @@ def get_env_vars(runner, parallel_id): # Test allowed characters def test_env_variable_allowed_characters(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_allowed.yml', skip_unsafe=False, parallel_id=parallel_id) env_var_output = get_env_vars(runner, parallel_id) assert 'TESTALLOWED=alpha-num123_' in env_var_output, Tests.assertion_info('TESTALLOWED=alpha-num123_', env_var_output) @@ -72,7 +65,7 @@ def test_env_variable_allowed_characters(): # Test too long values def test_env_variable_too_long(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', parallel_id=parallel_id) with pytest.raises(RuntimeError) as e: get_env_vars(runner, parallel_id) @@ -81,7 +74,7 @@ def test_env_variable_too_long(): # Test skip_unsafe=true def test_env_variable_skip_unsafe_true(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', skip_unsafe=True, parallel_id=parallel_id) env_var_output = get_env_vars(runner, parallel_id) # Only allowed values should be in env vars, forbidden ones should be skipped @@ -91,7 +84,7 @@ def test_env_variable_skip_unsafe_true(): # Test allow_unsafe=true def test_env_variable_allow_unsafe_true(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='env_vars_stress_forbidden.yml', allow_unsafe=True, parallel_id=parallel_id) env_var_output = get_env_vars(runner, parallel_id) # Both allowed and forbidden values should be in env vars @@ -119,7 +112,7 @@ def get_port_bindings(runner, parallel_id): def test_port_bindings_allow_unsafe_true(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', allow_unsafe=True, parallel_id=parallel_id) port, _ = get_port_bindings(runner, parallel_id) assert port.startswith('0.0.0.0:9017'), Tests.assertion_info('0.0.0.0:9017', port) @@ -127,7 +120,7 @@ def test_port_bindings_skip_unsafe_true(): out = io.StringIO() err = io.StringIO() parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', skip_unsafe=True, parallel_id=parallel_id) # need to catch exception here as otherwise the subprocess returning an error will # fail the test @@ -142,7 +135,7 @@ def test_port_bindings_skip_unsafe_true(): def test_port_bindings_no_skip_or_allow(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='port_bindings_stress.yml', parallel_id=parallel_id) with pytest.raises(Exception) as e: _, docker_port_err = get_port_bindings(runner, parallel_id) expected_container_error = f"Error: No public port \'9018/tcp\' published for test-container-{parallel_id}\n" @@ -159,7 +152,7 @@ def test_setup_commands_one_command(): out = io.StringIO() err = io.StringIO() parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='setup_commands_stress.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -175,7 +168,7 @@ def test_setup_commands_multiple_commands(): out = io.StringIO() err = io.StringIO() parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='setup_commands_multiple_stress.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -236,7 +229,7 @@ def test_depends_on_order(): out = io.StringIO() err = io.StringIO() parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='depends_on.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='depends_on.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -254,7 +247,7 @@ def test_depends_on_huge(): out = io.StringIO() err = io.StringIO() parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='depends_on_huge.yml', parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err): try: @@ -328,7 +321,7 @@ def test_depends_on_huge(): def test_depends_on_error_not_running(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='depends_on_error_not_running.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -340,7 +333,7 @@ def test_depends_on_error_not_running(): def test_depends_on_error_cyclic_dependency(): parallel_id=utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='depends_on_error_cycle.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -352,7 +345,7 @@ def test_depends_on_error_cyclic_dependency(): def test_depends_on_error_unsupported_condition(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='depends_on_error_unsupported_condition.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: Tests.run_until(runner, 'setup_services') @@ -365,7 +358,7 @@ def test_depends_on_error_unsupported_condition(): Tests.assertion_info(message, str(e.value)) def test_depends_on_long_form(): - runner = Tests.setup_runner(usage_scenario='depends_on_long_form.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='depends_on_long_form.yml') out = io.StringIO() err = io.StringIO() @@ -380,7 +373,7 @@ def test_depends_on_long_form(): def test_depends_on_healthcheck(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='healthcheck.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='healthcheck.yml', parallel_id=parallel_id) out = io.StringIO() err = io.StringIO() @@ -397,7 +390,7 @@ def test_depends_on_healthcheck(): def test_depends_on_healthcheck_error_missing(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='healthcheck_error_missing.yml', parallel_id=parallel_id) try: with pytest.raises(RuntimeError) as e: @@ -414,7 +407,7 @@ def test_depends_on_healthcheck_error_missing(): def test_volume_bindings_allow_unsafe_true(): parallel_id = utils.randomword(12) create_test_file("/tmp/gmt-test-data") - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', allow_unsafe=True, parallel_id=parallel_id) ls = get_contents_of_bound_volume(runner, parallel_id) assert 'test-file' in ls, Tests.assertion_info('test-file', ls) @@ -424,7 +417,7 @@ def test_volumes_bindings_skip_unsafe_true(): out = io.StringIO() err = io.StringIO() parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True, dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', skip_unsafe=True, parallel_id=parallel_id) with redirect_stdout(out), redirect_stderr(err), pytest.raises(Exception): ls = get_contents_of_bound_volume(runner, parallel_id) @@ -437,7 +430,7 @@ def test_volumes_bindings_no_skip_or_allow(): parallel_id = utils.randomword(12) create_test_file("/tmp/gmt-test-data") parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='volume_bindings_stress.yml', parallel_id=parallel_id) with pytest.raises(RuntimeError) as e: ls = get_contents_of_bound_volume(runner, parallel_id) assert ls == '', Tests.assertion_info('empty list', ls) @@ -446,7 +439,7 @@ def test_volumes_bindings_no_skip_or_allow(): Tests.assertion_info(f"Exception: {expected_exception}", str(e.value)) def test_network_created(): - runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='network_stress.yml') try: Tests.run_until(runner, 'setup_networks') ps = subprocess.run( @@ -463,7 +456,7 @@ def test_network_created(): def test_container_is_in_network(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='network_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='network_stress.yml', parallel_id=parallel_id) try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( @@ -484,7 +477,7 @@ def test_container_is_in_network(): # is started here to have the container running like bash or sh def test_cmd_ran(): parallel_id = utils.randomword(12) - runner = Tests.setup_runner(usage_scenario='cmd_stress.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True, parallel_id=parallel_id) + runner = Tests.setup_runner(usage_scenario='cmd_stress.yml', parallel_id=parallel_id) try: Tests.run_until(runner, 'setup_services') ps = subprocess.run( @@ -520,7 +513,7 @@ def test_uri_local_dir(): assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) def test_uri_local_dir_missing(): - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='/tmp/missing', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', uri='/tmp/missing') try: with pytest.raises(FileNotFoundError) as e: runner.run() @@ -551,7 +544,7 @@ def test_uri_github_repo(): ## --branch BRANCH # Optionally specify the git branch when targeting a git repository def test_uri_local_branch(): - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', branch='test-branch', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', branch='test-branch') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err), pytest.raises(RuntimeError) as e: @@ -680,7 +673,7 @@ def test_no_file_cleanup(): #pylint: disable=unused-variable def test_skip_and_allow_unsafe_both_true(): with pytest.raises(RuntimeError) as e: - runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='basic_stress.yml', skip_unsafe=True, allow_unsafe=True) expected_exception = 'Cannot specify both --skip-unsafe and --allow-unsafe' assert str(e.value) == expected_exception, Tests.assertion_info('', str(e.value)) @@ -707,7 +700,7 @@ def test_debug(monkeypatch): # can check for this note in the DB and the notes are about 2s apart def test_read_detached_process_no_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml', dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_no_exit.yml') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): @@ -721,7 +714,7 @@ def test_read_detached_process_no_exit(): Tests.assertion_info('NOT successful run completed', out.getvalue()) def test_read_detached_process_after_exit(): - runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml', dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_exit.yml') out = io.StringIO() err = io.StringIO() with redirect_stdout(out), redirect_stderr(err): @@ -733,7 +726,7 @@ def test_read_detached_process_after_exit(): Tests.assertion_info('successful run completed', out.getvalue()) def test_read_detached_process_failure(): - runner = Tests.setup_runner(usage_scenario='stress_detached_failure.yml', dev_no_metrics=True, dev_no_sleeps=True, dev_no_build=True) + runner = Tests.setup_runner(usage_scenario='stress_detached_failure.yml') out = io.StringIO() err = io.StringIO() diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index 9a49db9eb..bd840bdf1 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -21,11 +21,6 @@ def register_machine_fixture(): machine = Machine(machine_id=1, description='test-machine') machine.register() -# This should be done once per module -@pytest.fixture(autouse=True, scope="module", name="build_image") -def build_image_fixture(): - subprocess.run(['docker', 'compose', '-f', f"{CURRENT_DIR}/../stress-application/compose.yml", 'build'], check=True) - def get_job(job_id): query = """ SELECT From eb537e7c0f0436d5b74e6534cecf2439cb2d9af7 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Tue, 13 Feb 2024 15:34:16 +0100 Subject: [PATCH 22/27] corrected workflow input check syntax --- .github/actions/gmt-pytest/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index f17cebb06..d349e71ff 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -86,7 +86,7 @@ runs: shell: bash - name: Run Tests - if: ${{ inputs.run-examples-directory-tests }} == false + if: inputs.run-examples-directory-tests == false shell: bash working-directory: ${{ inputs.gmt-directory }}/tests run: | @@ -95,7 +95,7 @@ runs: python3 -m pytest -m "serial" -rA | tee -a /tmp/test-results.txt - name: Run Tests (examples directory) - if: ${{ inputs.run-examples-directory-tests }} == true + if: inputs.run-examples-directory-tests == true shell: bash working-directory: ${{ inputs.gmt-directory }}/tests run: | From d860a3353ce3aae3759cff808f92a1cd358a6e83 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Tue, 13 Feb 2024 15:44:57 +0100 Subject: [PATCH 23/27] capitalization --- .github/workflows/tests-vm-pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-vm-pr.yml b/.github/workflows/tests-vm-pr.yml index 35d4a6efb..1e51df068 100644 --- a/.github/workflows/tests-vm-pr.yml +++ b/.github/workflows/tests-vm-pr.yml @@ -27,7 +27,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} - run-examples-directory-tests: False + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@v2 From 5c8f3f0e4591d4f2de09b3d61b1fe9837fde1fa1 Mon Sep 17 00:00:00 2001 From: dan-mm Date: Tue, 13 Feb 2024 16:10:03 +0100 Subject: [PATCH 24/27] github workflow inputs arent real bools --- .github/actions/gmt-pytest/action.yml | 4 ++-- .github/workflows/tests-bare-metal-main.yml | 2 +- .github/workflows/tests-eco-ci-energy-estimation.yaml | 2 +- .github/workflows/tests-vm-main.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/actions/gmt-pytest/action.yml b/.github/actions/gmt-pytest/action.yml index d349e71ff..8450c2625 100644 --- a/.github/actions/gmt-pytest/action.yml +++ b/.github/actions/gmt-pytest/action.yml @@ -86,7 +86,7 @@ runs: shell: bash - name: Run Tests - if: inputs.run-examples-directory-tests == false + if: inputs.run-examples-directory-tests == 'false' shell: bash working-directory: ${{ inputs.gmt-directory }}/tests run: | @@ -95,7 +95,7 @@ runs: python3 -m pytest -m "serial" -rA | tee -a /tmp/test-results.txt - name: Run Tests (examples directory) - if: inputs.run-examples-directory-tests == true + if: inputs.run-examples-directory-tests == 'true' shell: bash working-directory: ${{ inputs.gmt-directory }}/tests run: | diff --git a/.github/workflows/tests-bare-metal-main.yml b/.github/workflows/tests-bare-metal-main.yml index f7734bd5a..c76ab7806 100644 --- a/.github/workflows/tests-bare-metal-main.yml +++ b/.github/workflows/tests-bare-metal-main.yml @@ -38,7 +38,7 @@ jobs: with: metrics-to-turn-off: 'Machine Sensors Debug MacOS' github-token: ${{ secrets.GITHUB_TOKEN }} - run-examples-directory-tests: False + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@main diff --git a/.github/workflows/tests-eco-ci-energy-estimation.yaml b/.github/workflows/tests-eco-ci-energy-estimation.yaml index 2adbad440..a8d76b232 100644 --- a/.github/workflows/tests-eco-ci-energy-estimation.yaml +++ b/.github/workflows/tests-eco-ci-energy-estimation.yaml @@ -32,7 +32,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} - run-examples-directory-tests: False + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@testing diff --git a/.github/workflows/tests-vm-main.yml b/.github/workflows/tests-vm-main.yml index 9506f94b0..f66e617de 100644 --- a/.github/workflows/tests-vm-main.yml +++ b/.github/workflows/tests-vm-main.yml @@ -36,7 +36,7 @@ jobs: with: metrics-to-turn-off: '--categories RAPL Machine Sensors Debug CGroupV2 MacOS GPU --providers PsuEnergyAcSdiaMachineProvider' github-token: ${{ secrets.GITHUB_TOKEN }} - run-examples-directory-tests: False + run-examples-directory-tests: false - name: Eco CI Energy Estimation - Get Measurement uses: green-coding-berlin/eco-ci-energy-estimation@v2 From 0d8f26a56ed0e4445b354343a892d070b7d4916d Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 16 Feb 2024 10:56:55 +0100 Subject: [PATCH 25/27] fix test_jobs (improper cleanup after insert job test); cleanup test directories in /tmp/; --- runner.py | 2 +- tests/conftest.py | 7 +++++++ tests/tools/test_jobs.py | 28 ++++++++++++++++++++++------ 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/runner.py b/runner.py index cc12db580..8c4d51044 100755 --- a/runner.py +++ b/runner.py @@ -72,7 +72,7 @@ def __init__(self, self._uri_type = uri_type self._original_filename = filename self._branch = branch - self._tmp_folder = "/tmp/green-metrics-tool" + self._tmp_folder = '/tmp/green-metrics-tool' self._usage_scenario = {} self._architecture = utils.get_architecture() self._sci = {'R_d': None, 'R': 0} diff --git a/tests/conftest.py b/tests/conftest.py index b54a52ba7..3cb5717b2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -34,6 +34,13 @@ def cleanup_temp_directories(): if os.path.exists("/tmp/gmt-test-data/"): shutil.rmtree("/tmp/gmt-test-data/") + # remove all files/folders under gmt_tests-* in /tmp + for item in os.listdir("/tmp"): + if item.startswith('gmt_tests-'): + item_path = os.path.join("/tmp", item) + if os.path.isdir(item_path): + shutil.rmtree(item_path) + def build_image_fixture(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) subprocess.run(['docker', 'compose', '-f', uri+'/compose_gmt_run_tmp.yml', 'build'], check=True) diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index bd840bdf1..2b1d92f8b 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -21,6 +21,12 @@ def register_machine_fixture(): machine = Machine(machine_id=1, description='test-machine') machine.register() + +# This should be done once per module +# @pytest.fixture(autouse=True, scope="module", name="build_image") +# def build_image_fixture(): +# subprocess.run(['docker', 'compose', '-f', f"{CURRENT_DIR}/../stress-application/compose.yml", 'build'], check=True) + def get_job(job_id): query = """ SELECT @@ -35,11 +41,8 @@ def get_job(job_id): return data -#@pytest.mark.xdist_group(name="jobs") @pytest.mark.serial -def test_no_job_to_process(): - # make sure jobs table is empty - DB().query('TRUNCATE TABLE jobs RESTART IDENTITY CASCADE') +def test_no_run_job(): ps = subprocess.run( ['python3', '../tools/jobs.py', 'run', '--config-override', 'test-config.yml'], check=True, @@ -47,18 +50,31 @@ def test_no_job_to_process(): stdout=subprocess.PIPE, encoding='UTF-8' ) + print(ps.stderr) + assert 'No job to process. Exiting' in ps.stdout,\ + Tests.assertion_info('No job to process. Exiting', ps.stdout) + +@pytest.mark.serial +def test_no_email_job(): + ps = subprocess.run( + ['python3', '../tools/jobs.py', 'email', '--config-override', 'test-config.yml'], + check=True, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + encoding='UTF-8' + ) assert 'No job to process. Exiting' in ps.stdout,\ Tests.assertion_info('No job to process. Exiting', ps.stdout) -#@pytest.mark.xdist_group(name="jobs") @pytest.mark.serial def test_insert_job(): job_id = Job.insert('Test Name', 'Test URL', 'Test Email', 'Test Branch', 'Test filename', 1) assert job_id is not None job = Job.get_job('run') assert job._state == 'WAITING' + ## cleanup + DB().query('TRUNCATE TABLE jobs RESTART IDENTITY CASCADE') -#@pytest.mark.xdist_group(name="jobs") @pytest.mark.serial def test_simple_run_job(): name = utils.randomword(12) From 5cbc0b06d46a4074811a601146f4c2771a6e8a7f Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 16 Feb 2024 11:07:43 +0100 Subject: [PATCH 26/27] updated tests Readme --- tests/README.MD | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/tests/README.MD b/tests/README.MD index d5075bf3f..a5657f2e5 100644 --- a/tests/README.MD +++ b/tests/README.MD @@ -39,9 +39,25 @@ There are a few scripts to make this easy. `./run-tests.sh` will do everything - start the containers, run pytest, and then stop the containers. The recommended workflow is to start the containers with the `./start-test-containers.sh` script, then in another shell -window run the pytest suite using `pytest -n auto --dist loadgroup`, and then stop the containers when your test run has finished. +window run the pytest suite using: + +`pytest -n auto -m "not serial" --dist loadgroup && pytest -m "serial"`, and then stop the containers when your test run has finished. Running a subset of tests using pytest is better explained within the documentation here: https://docs.pytest.org/en/7.2.x/how-to/usage.html You can also do everything in one command using the `./run-tests.sh` script. + + +## Parallelization +We now support running our test suite with parallelization using xdist. When writing tests it is important to note that not all tests can be parallelized, and the ones that cannot need to be marked accordingly. For parallelization, we use functions in test_functions.py to setup the environment with unique container names, as well as setting up the runner with setup_runner so that its tmp folders are also unique. If you bypass using the setup_runner, you will need need to still use the `parallelize_runner_folders` function to make sure its internal directories are correct. + +Any test that cannot be parrallelized should be marked with: +`@pytest.mark.serial` + +This includes any test that runs the runner through a subprocess, or otherwise creates a Runner class withhout using either test_functions.setup_runner or test_functions.parallelize_runner_folders + +- tests that do not skip_system_checks can be parallelized, but only if they are marked with +`@pytest.mark.xdist_group(name="systems_checks")` + +This will make all tests that use group name run sequentially on the same thread (but parallel to the rest of the suite). This is needed because we have a system check which makes sure the metric providers are not already running during setup. \ No newline at end of file From d5087ed09969f4d791c89733f50a61fc52d7f8fc Mon Sep 17 00:00:00 2001 From: dan-mm Date: Fri, 16 Feb 2024 17:33:17 +0100 Subject: [PATCH 27/27] removed unneeded dummy cpu util provider; renamed RUN_NAME -> name for readability --- .../cpu/utilization/procfs/system/Makefile | 4 - .../cpu/utilization/procfs/system/provider.py | 17 --- .../cpu/utilization/procfs/system/source.c | 118 ------------------ tests/test_config_opts.py | 4 +- tests/test_usage_scenario.py | 53 ++++---- tests/tools/test_jobs.py | 6 - 6 files changed, 29 insertions(+), 173 deletions(-) delete mode 100644 metric_providers/dummy/cpu/utilization/procfs/system/Makefile delete mode 100644 metric_providers/dummy/cpu/utilization/procfs/system/provider.py delete mode 100644 metric_providers/dummy/cpu/utilization/procfs/system/source.c diff --git a/metric_providers/dummy/cpu/utilization/procfs/system/Makefile b/metric_providers/dummy/cpu/utilization/procfs/system/Makefile deleted file mode 100644 index 3fbdd7c34..000000000 --- a/metric_providers/dummy/cpu/utilization/procfs/system/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -CFLAGS = -o3 -Wall - -metric-provider-binary: source.c - gcc $< $(CFLAGS) -o $@ \ No newline at end of file diff --git a/metric_providers/dummy/cpu/utilization/procfs/system/provider.py b/metric_providers/dummy/cpu/utilization/procfs/system/provider.py deleted file mode 100644 index e103f2ad6..000000000 --- a/metric_providers/dummy/cpu/utilization/procfs/system/provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -from metric_providers.base import BaseMetricProvider - -class DummyCpuUtilizationProcfsSystemProvider(BaseMetricProvider): - def __init__(self, resolution, skip_check=False): - super().__init__( - metric_name='dummy_cpu_utilization_procfs_system', - metrics={'time': int, 'value': int}, - resolution=resolution, - unit='Ratio', - current_dir=os.path.dirname(os.path.abspath(__file__)), - skip_check = skip_check, - ) - - def check_system(self): - pass diff --git a/metric_providers/dummy/cpu/utilization/procfs/system/source.c b/metric_providers/dummy/cpu/utilization/procfs/system/source.c deleted file mode 100644 index b46f51335..000000000 --- a/metric_providers/dummy/cpu/utilization/procfs/system/source.c +++ /dev/null @@ -1,118 +0,0 @@ -#include -#include -#include -#include -#include -#include - -typedef struct procfs_time_t { // struct is a specification and this static makes no sense here - unsigned long user_time; - unsigned long nice_time; - unsigned long system_time; - unsigned long wait_time; - unsigned long iowait_time; - unsigned long irq_time; - unsigned long softirq_time; - unsigned long steal_time; - // guest times are ignored as they are already accounted in user_time, system_time - unsigned long compute_time; // custom attr by us not in standard /proc/stat format - unsigned long idle_time; // custom attr by us not in standard /proc/stat format -} procfs_time_t; - - -// All variables are made static, because we believe that this will -// keep them local in scope to the file and not make them persist in state -// between Threads. -// TODO: If this code ever gets multi-threaded please review this assumption to -// not pollute another threads state -static unsigned int msleep_time=1000; - -static void read_cpu_proc(procfs_time_t* procfs_time_struct) { - - FILE* fd = NULL; - - fd = fopen("/proc/stat", "r"); - if ( fd == NULL) { - fprintf(stderr, "Error - file %s failed to open: errno: %d\n", "/proc/stat/", errno); - exit(1); - } - - fscanf(fd, "cpu %ld %ld %ld %ld %ld %ld %ld %ld", &procfs_time_struct->user_time, &procfs_time_struct->nice_time, &procfs_time_struct->system_time, &procfs_time_struct->wait_time, &procfs_time_struct->iowait_time, &procfs_time_struct->irq_time, &procfs_time_struct->softirq_time, &procfs_time_struct->steal_time); - - // debug - // printf("Read: cpu %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", procfs_time_struct->user_time, procfs_time_struct->nice_time, procfs_time_struct->system_time, procfs_time_struct->idle_time, procfs_time_struct->iowait_time, procfs_time_struct->irq_time, procfs_time_struct->softirq_time, procfs_time_struct->steal_time); - - fclose(fd); - - // after this multiplication we are on microseconds - // integer division is deliberately, cause we don't loose precision as *1000000 is done before - - procfs_time_struct->idle_time = procfs_time_struct->wait_time + procfs_time_struct->iowait_time; - procfs_time_struct->compute_time = procfs_time_struct->user_time + procfs_time_struct->nice_time + procfs_time_struct->system_time + procfs_time_struct->irq_time + procfs_time_struct->softirq_time + procfs_time_struct->steal_time; -} - - -static int output_stats() { - - long int idle_reading, compute_time_reading; - procfs_time_t main_cpu_reading_before; - procfs_time_t main_cpu_reading_after; - struct timeval now; - - gettimeofday(&now, NULL); // will set now - read_cpu_proc(&main_cpu_reading_before); // will set main_cpu_reading_before - - usleep(msleep_time*1000); - - read_cpu_proc(&main_cpu_reading_after); // will set main_cpu_reading_before - - idle_reading = main_cpu_reading_after.idle_time - main_cpu_reading_before.idle_time; - compute_time_reading = main_cpu_reading_after.compute_time - main_cpu_reading_before.compute_time; - - // debug - // printf("Main CPU Idle Reading: %ld\nMain CPU Compute Time Reading: %ld\n", idle_reading, compute_time_reading); - // printf("%ld%06ld %f\n", now.tv_sec, now.tv_usec, (double)compute_time_reading / (double)(compute_time_reading+idle_reading)); - - // main output to Stdout - printf("%ld%06ld %ld\n", now.tv_sec, now.tv_usec, (compute_time_reading*10000) / (compute_time_reading+idle_reading) ); // Deliberate integer conversion. Precision with 0.01% is good enough - - return 1; -} - -int main(int argc, char **argv) { - - int c; - - setvbuf(stdout, NULL, _IONBF, 0); - - while ((c = getopt (argc, argv, "i:h")) != -1) { - switch (c) { - case 'h': - printf("Usage: %s [-i msleep_time] [-h]\n\n",argv[0]); - printf("\t-h : displays this help\n"); - printf("\t-i : specifies the milliseconds sleep time that will be slept between measurements\n\n"); - - struct timespec res; - double resolution; - - printf("\tEnvironment variables:\n"); - clock_getres(CLOCK_REALTIME, &res); - resolution = res.tv_sec + (((double)res.tv_nsec)/1.0e9); - printf("\tSystemHZ\t%ld\n", (unsigned long)(1/resolution + 0.5)); - printf("\tCLOCKS_PER_SEC\t%ld\n", CLOCKS_PER_SEC); - exit(0); - case 'i': - msleep_time = atoi(optarg); - break; - default: - fprintf(stderr,"Unknown option %c\n",c); - exit(-1); - } - } - - while(1) { - output_stats(); - } - - return 0; -} diff --git a/tests/test_config_opts.py b/tests/test_config_opts.py index 64764ce6e..4cfd9993e 100644 --- a/tests/test_config_opts.py +++ b/tests/test_config_opts.py @@ -30,8 +30,8 @@ def run_runner(): CURRENT_DIR, 'stress-application/')) # Run the application - RUN_NAME = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', verbose_provider_boot=True, skip_system_checks=True) + name = 'test_' + utils.randomword(12) + runner = Runner(name=name, uri=uri, uri_type='folder', verbose_provider_boot=True, skip_system_checks=True) return runner.run() # Rethink how to do this test entirely diff --git a/tests/test_usage_scenario.py b/tests/test_usage_scenario.py index 427d9ba3b..b2759c82c 100644 --- a/tests/test_usage_scenario.py +++ b/tests/test_usage_scenario.py @@ -498,9 +498,9 @@ def test_cmd_ran(): # / or a remote git repository starting with http(s):// def test_uri_local_dir(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', + ['python3', '../runner.py', '--name', name, '--uri', uri ,'--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, stderr=subprocess.PIPE, @@ -508,7 +508,7 @@ def test_uri_local_dir(): encoding='UTF-8' ) - uri_in_db = utils.get_run_data(RUN_NAME)['uri'] + uri_in_db = utils.get_run_data(name)['uri'] assert uri_in_db == uri, Tests.assertion_info(f"uri: {uri}", uri_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) @@ -527,9 +527,9 @@ def test_uri_local_dir_missing(): @pytest.mark.serial def test_uri_github_repo(): uri = 'https://github.com/green-coding-berlin/pytest-dummy-repo' - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', + ['python3', '../runner.py', '--name', name, '--uri', uri ,'--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, stderr=subprocess.PIPE, @@ -537,7 +537,7 @@ def test_uri_github_repo(): encoding='UTF-8' ) - uri_in_db = utils.get_run_data(RUN_NAME)['uri'] + uri_in_db = utils.get_run_data(name)['uri'] assert uri_in_db == uri, Tests.assertion_info(f"uri: {uri}", uri_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) @@ -559,9 +559,9 @@ def test_uri_local_branch(): @pytest.mark.serial def test_uri_github_repo_branch(): uri = 'https://github.com/green-coding-berlin/pytest-dummy-repo' - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--branch', 'test-branch' , '--filename', 'basic_stress.yml', '--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -570,7 +570,7 @@ def test_uri_github_repo_branch(): encoding='UTF-8' ) - branch_in_db = utils.get_run_data(RUN_NAME)['branch'] + branch_in_db = utils.get_run_data(name)['branch'] assert branch_in_db == 'test-branch', Tests.assertion_info('branch: test-branch', branch_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) @@ -599,17 +599,17 @@ def test_uri_github_repo_branch_missing(): @pytest.mark.serial def test_name_is_in_db(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri ,'--config-override', 'test-config.yml', + ['python3', '../runner.py', '--name', name, '--uri', uri ,'--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-metrics', '--dev-no-sleeps', '--dev-no-build'], check=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding='UTF-8' ) - name_in_db = utils.get_run_data(RUN_NAME)['name'] - assert name_in_db == RUN_NAME, Tests.assertion_info(f"name: {RUN_NAME}", name_in_db) + name_in_db = utils.get_run_data(name)['name'] + assert name_in_db == name, Tests.assertion_info(f"name: {name}", name_in_db) # --filename FILENAME # An optional alternative filename if you do not want to use "usage_scenario.yml" @@ -621,10 +621,10 @@ def test_different_filename(): compose_path = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/compose.yml')) Tests.make_proj_dir(dir_name=dir_name, usage_scenario_path=usage_scenario_path, docker_compose_path=compose_path) uri = os.path.join(CURRENT_DIR, 'tmp/', dir_name) - RUN_NAME = 'test_' + utils.randomword(12) - + name = 'test_' + dir_name + print(name) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--filename', 'basic_stress.yml', '--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -633,9 +633,10 @@ def test_different_filename(): encoding='UTF-8' ) + print(ps.stdout) with open(usage_scenario_path, 'r', encoding='utf-8') as f: usage_scenario_contents = yaml.safe_load(f) - usage_scenario_in_db = utils.get_run_data(RUN_NAME)['usage_scenario'] + usage_scenario_in_db = utils.get_run_data(name)['usage_scenario'] assert usage_scenario_in_db == usage_scenario_contents,\ Tests.assertion_info(usage_scenario_contents, usage_scenario_in_db) assert ps.stderr == '', Tests.assertion_info('no errors', ps.stderr) @@ -643,9 +644,9 @@ def test_different_filename(): # if that filename is missing... def test_different_filename_missing(): uri = os.path.abspath(os.path.join(CURRENT_DIR, '..', 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) - runner = Runner(name=RUN_NAME, uri=uri, uri_type='folder', filename='basic_stress.yml', skip_system_checks=True, dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=True) + runner = Runner(name=name, uri=uri, uri_type='folder', filename='basic_stress.yml', skip_system_checks=True, dev_no_build=True, dev_no_sleeps=True, dev_no_metrics=True) with pytest.raises(FileNotFoundError) as e: runner.run() @@ -658,9 +659,9 @@ def test_different_filename_missing(): @pytest.mark.serial def test_no_file_cleanup(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--no-file-cleanup', '--config-override', 'test-config.yml', '--skip-system-checks'], check=True, stderr=subprocess.PIPE, @@ -681,9 +682,9 @@ def test_skip_and_allow_unsafe_both_true(): def test_debug(monkeypatch): monkeypatch.setattr('sys.stdin', io.StringIO('Enter')) uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--debug', '--config-override', 'test-config.yml', '--skip-system-checks', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -743,9 +744,9 @@ def test_read_detached_process_failure(): ## rethink this one def wip_test_verbose_provider_boot(): uri = os.path.abspath(os.path.join(CURRENT_DIR, 'stress-application/')) - RUN_NAME = 'test_' + utils.randomword(12) + name = 'test_' + utils.randomword(12) ps = subprocess.run( - ['python3', '../runner.py', '--name', RUN_NAME, '--uri', uri , + ['python3', '../runner.py', '--name', name, '--uri', uri , '--verbose-provider-boot', '--config-override', 'test-config.yml', '--dev-no-sleeps', '--dev-no-build', '--dev-no-metrics'], check=True, @@ -753,7 +754,7 @@ def wip_test_verbose_provider_boot(): stdout=subprocess.PIPE, encoding='UTF-8' ) - run_id = utils.get_run_data(RUN_NAME)['id'] + run_id = utils.get_run_data(name)['id'] query = """ SELECT time, note diff --git a/tests/tools/test_jobs.py b/tests/tools/test_jobs.py index 2b1d92f8b..a2b72eac3 100644 --- a/tests/tools/test_jobs.py +++ b/tests/tools/test_jobs.py @@ -21,12 +21,6 @@ def register_machine_fixture(): machine = Machine(machine_id=1, description='test-machine') machine.register() - -# This should be done once per module -# @pytest.fixture(autouse=True, scope="module", name="build_image") -# def build_image_fixture(): -# subprocess.run(['docker', 'compose', '-f', f"{CURRENT_DIR}/../stress-application/compose.yml", 'build'], check=True) - def get_job(job_id): query = """ SELECT