diff --git a/benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake b/benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake index 1f84bb829d2..2ca6532648d 100644 --- a/benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake +++ b/benchmark/cmake/modules/AddSwiftBenchmarkSuite.cmake @@ -701,11 +701,11 @@ function(swift_benchmark_compile) COMMAND "${swift-bin-dir}/Benchmark_Driver" "run" "-o" "O" "--output-dir" "${CMAKE_CURRENT_BINARY_DIR}/logs" "--swift-repo" "${SWIFT_SOURCE_DIR}" - "--iterations" "${SWIFT_BENCHMARK_NUM_O_ITERATIONS}" + "--independent-samples" "${SWIFT_BENCHMARK_NUM_O_ITERATIONS}" COMMAND "${swift-bin-dir}/Benchmark_Driver" "run" "-o" "Onone" "--output-dir" "${CMAKE_CURRENT_BINARY_DIR}/logs" "--swift-repo" "${SWIFT_SOURCE_DIR}" - "--iterations" "${SWIFT_BENCHMARK_NUM_ONONE_ITERATIONS}" + "--independent-samples" "${SWIFT_BENCHMARK_NUM_ONONE_ITERATIONS}" COMMAND "${swift-bin-dir}/Benchmark_Driver" "compare" "--log-dir" "${CMAKE_CURRENT_BINARY_DIR}/logs" "--swift-repo" "${SWIFT_SOURCE_DIR}" diff --git a/benchmark/scripts/Benchmark_Driver b/benchmark/scripts/Benchmark_Driver index adf182b4d0c..3812ae4029b 100755 --- a/benchmark/scripts/Benchmark_Driver +++ b/benchmark/scripts/Benchmark_Driver @@ -167,7 +167,7 @@ class BenchmarkDriver(object): return reduce(merge_results, [self.run(test, measure_memory=True) - for _ in range(self.args.iterations)]) + for _ in range(self.args.independent_samples)]) def log_results(self, output, log_file=None): """Log output to `log_file`. @@ -603,7 +603,7 @@ def parse_args(args): help='Run benchmarks and output results to stdout', parents=[shared_benchmarks_parser]) run_parser.add_argument( - '-i', '--iterations', + '-i', '--independent-samples', help='number of times to run each test (default: 1)', type=positive_int, default=1) run_parser.add_argument( diff --git a/benchmark/scripts/test_Benchmark_Driver.py b/benchmark/scripts/test_Benchmark_Driver.py index c4dd29192c0..c35e8093fd6 100644 --- a/benchmark/scripts/test_Benchmark_Driver.py +++ b/benchmark/scripts/test_Benchmark_Driver.py @@ -96,15 +96,16 @@ class Test_parse_args(unittest.TestCase): "(choose from 'O', 'Onone', 'Osize')"], err.getvalue()) - def test_iterations(self): - self.assertEquals(parse_args(['run']).iterations, 1) - self.assertEquals(parse_args(['run', '-i', '3']).iterations, 3) + def test_independent_samples(self): + self.assertEquals(parse_args(['run']).independent_samples, 1) + self.assertEquals(parse_args(['run', '-i', '3']).independent_samples, + 3) with captured_output() as (out, err): self.assertRaises(SystemExit, parse_args, ['run', '-i', '-3']) self.assert_contains( - ['error:', - "argument -i/--iterations: invalid positive_int value: '-3'"], + ['error:', "argument -i/--independent-samples: " + + "invalid positive_int value: '-3'"], err.getvalue()) def test_output_dir(self): @@ -279,7 +280,7 @@ class TestBenchmarkDriverRunningTests(unittest.TestCase): ('/benchmarks/Benchmark_O', 'b', '--memory')) def test_run_benchmark_independent_samples(self): - self.driver.args.iterations = 3 + self.driver.args.independent_samples = 3 r = self.driver.run_independent_samples('b1') self.assertEquals(self.subprocess_mock.calls.count( ('/benchmarks/Benchmark_O', 'b1', '--memory')), 3)