|
1 | 1 | import tempfile |
2 | 2 | import json |
| 3 | +import time |
| 4 | +import signal |
| 5 | +import subprocess |
| 6 | +import os |
3 | 7 | from include import * |
4 | 8 | from mb import Benchmark, RunConfig |
5 | 9 |
|
@@ -907,3 +911,88 @@ def test_uri_invalid_database(env): |
907 | 911 | # benchmark.run() should return False for invalid database number |
908 | 912 | memtier_ok = benchmark.run() |
909 | 913 | env.assertFalse(memtier_ok) |
| 914 | + |
| 915 | + |
| 916 | +def test_interrupt_signal_handling(env): |
| 917 | + """Test that Ctrl+C (SIGINT) properly stops the benchmark and outputs correct statistics""" |
| 918 | + # Use a large number of requests so the test doesn't finish before we interrupt it |
| 919 | + benchmark_specs = {"name": env.testName, "args": ['--requests=1000000', '--hide-histogram']} |
| 920 | + addTLSArgs(benchmark_specs, env) |
| 921 | + config = get_default_memtier_config(threads=4, clients=50, requests=1000000) |
| 922 | + master_nodes_list = env.getMasterNodesList() |
| 923 | + |
| 924 | + add_required_env_arguments(benchmark_specs, config, env, master_nodes_list) |
| 925 | + |
| 926 | + # Create a temporary directory |
| 927 | + test_dir = tempfile.mkdtemp() |
| 928 | + config = RunConfig(test_dir, env.testName, config, {}) |
| 929 | + ensure_clean_benchmark_folder(config.results_dir) |
| 930 | + |
| 931 | + benchmark = Benchmark.from_json(config, benchmark_specs) |
| 932 | + |
| 933 | + # Start the benchmark process manually so we can send SIGINT |
| 934 | + import logging |
| 935 | + logging.debug(' Command: %s', ' '.join(benchmark.args)) |
| 936 | + |
| 937 | + stderr_file = open(os.path.join(config.results_dir, 'mb.stderr'), 'wb') |
| 938 | + process = subprocess.Popen( |
| 939 | + stdin=None, stdout=subprocess.PIPE, stderr=stderr_file, |
| 940 | + executable=benchmark.binary, args=benchmark.args) |
| 941 | + |
| 942 | + # Wait 3 seconds then send SIGINT |
| 943 | + time.sleep(3) |
| 944 | + process.send_signal(signal.SIGINT) |
| 945 | + |
| 946 | + # Wait for process to finish |
| 947 | + _stdout, _ = process.communicate() |
| 948 | + stderr_file.close() |
| 949 | + |
| 950 | + # Write stdout to file |
| 951 | + benchmark.write_file('mb.stdout', _stdout) |
| 952 | + |
| 953 | + # Read stderr to check for interrupt message |
| 954 | + with open(os.path.join(config.results_dir, 'mb.stderr'), 'r') as stderr: |
| 955 | + stderr_content = stderr.read() |
| 956 | + # Check that the interrupt message is present and shows elapsed time |
| 957 | + env.assertTrue("Interrupted by user (Ctrl+C) after" in stderr_content) |
| 958 | + env.assertTrue("secs, stopping threads..." in stderr_content) |
| 959 | + |
| 960 | + # Check JSON output |
| 961 | + json_filename = '{0}/mb.json'.format(config.results_dir) |
| 962 | + env.assertTrue(os.path.isfile(json_filename)) |
| 963 | + |
| 964 | + with open(json_filename) as results_json: |
| 965 | + results_dict = json.load(results_json) |
| 966 | + |
| 967 | + # Check that Runtime section exists and has Interrupted flag |
| 968 | + env.assertTrue("ALL STATS" in results_dict) |
| 969 | + env.assertTrue("Runtime" in results_dict["ALL STATS"]) |
| 970 | + runtime = results_dict["ALL STATS"]["Runtime"] |
| 971 | + |
| 972 | + # Verify interrupted flag is set to "true" |
| 973 | + env.assertTrue("Interrupted" in runtime) |
| 974 | + env.assertEqual(runtime["Interrupted"], "true") |
| 975 | + |
| 976 | + # Verify duration is reasonable (should be around 3 seconds, give or take) |
| 977 | + env.assertTrue("Total duration" in runtime) |
| 978 | + duration_ms = runtime["Total duration"] |
| 979 | + env.assertTrue(duration_ms >= 2000) # At least 2 seconds |
| 980 | + env.assertTrue(duration_ms <= 5000) # At most 5 seconds |
| 981 | + |
| 982 | + # Verify that throughput metrics are NOT zero |
| 983 | + totals_metrics = results_dict["ALL STATS"]["Totals"] |
| 984 | + |
| 985 | + # Check ops/sec is not zero |
| 986 | + env.assertTrue("Ops/sec" in totals_metrics) |
| 987 | + total_ops_sec = totals_metrics["Ops/sec"] |
| 988 | + env.assertTrue(total_ops_sec > 0) |
| 989 | + |
| 990 | + # Check latency metrics are not zero |
| 991 | + env.assertTrue("Latency" in totals_metrics) |
| 992 | + total_latency = totals_metrics["Latency"] |
| 993 | + env.assertTrue(total_latency > 0) |
| 994 | + |
| 995 | + # Check that we actually processed some operations |
| 996 | + env.assertTrue("Count" in totals_metrics) |
| 997 | + total_count = totals_metrics["Count"] |
| 998 | + env.assertTrue(total_count > 0) |
0 commit comments