mirror of
https://github.com/apple/swift.git
synced 2025-12-14 20:36:38 +01:00
The `__future__` we relied on is now, where the 3 specific things are all included [since Python 3.0](https://docs.python.org/3/library/__future__.html): * absolute_import * print_function * unicode_literals * division These import statements are no-ops and are no longer necessary.
173 lines
5.3 KiB
Python
173 lines
5.3 KiB
Python
#!/usr/bin/env python3
|
|
|
|
# ===--- Benchmark_DTrace.in ---------------------------------------------===//
|
|
#
|
|
# This source file is part of the Swift.org open source project
|
|
#
|
|
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
|
|
# Licensed under Apache License v2.0 with Runtime Library Exception
|
|
#
|
|
# See https://swift.org/LICENSE.txt for license information
|
|
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
|
|
#
|
|
# ===---------------------------------------------------------------------===//
|
|
|
|
import argparse
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
|
|
DRIVER_LIBRARY_PATH = "@PATH_TO_DRIVER_LIBRARY@"
|
|
sys.path.append(DRIVER_LIBRARY_PATH)
|
|
DTRACE_PATH = os.path.join(DRIVER_LIBRARY_PATH, "swift_stats.d")
|
|
|
|
import perf_test_driver # noqa (E402 module level import not at top of file)
|
|
|
|
# Regexes for the XFAIL_LIST. Matches against '([Onone|O|Osize],TestName)'
|
|
XFAIL_LIST = []
|
|
|
|
|
|
class DTraceResult(perf_test_driver.Result):
|
|
def __init__(self, name, status, output, csv_output):
|
|
perf_test_driver.Result.__init__(self, name, status, output, XFAIL_LIST)
|
|
self.csv_output = csv_output
|
|
|
|
def is_failure(self):
|
|
return not bool(self.status)
|
|
|
|
@classmethod
|
|
def data_headers(cls):
|
|
return ["Name", "Result", "Total RR Opts", "Total RR Opts/Iter"]
|
|
|
|
@classmethod
|
|
def data_format(cls, max_test_len):
|
|
non_name_headers = DTraceResult.data_headers()[1:]
|
|
fmt = ("{:<%d}" % (max_test_len + 5)) + "".join(
|
|
["{:<%d}" % (len(h) + 2) for h in non_name_headers]
|
|
)
|
|
return fmt
|
|
|
|
@classmethod
|
|
def print_data_header(cls, max_test_len, csv_output):
|
|
headers = cls.data_headers()
|
|
if csv_output:
|
|
print(",".join(headers))
|
|
return
|
|
print(cls.data_format(max_test_len).format(*headers))
|
|
|
|
def print_data(self, max_test_len):
|
|
result = [self.get_name(), self.get_result()] + map(str, self.output)
|
|
if self.csv_output:
|
|
print(",".join(result))
|
|
return
|
|
|
|
print(DTraceResult.data_format(max_test_len).format(*result))
|
|
|
|
|
|
class DTraceBenchmarkDriver(perf_test_driver.BenchmarkDriver):
|
|
def __init__(self, binary, xfail_list, csv_output):
|
|
perf_test_driver.BenchmarkDriver.__init__(
|
|
self, binary, xfail_list, enable_parallel=True, opt_levels=["O"]
|
|
)
|
|
self.csv_output = csv_output
|
|
|
|
def print_data_header(self, max_test_len):
|
|
DTraceResult.print_data_header(max_test_len, self.csv_output)
|
|
|
|
def prepare_input(self, name):
|
|
return {}
|
|
|
|
def process_input(self, data):
|
|
test_name = "({}_{})".format(data["opt"], data["test_name"])
|
|
print("Running {}...".format(test_name))
|
|
sys.stdout.flush()
|
|
|
|
def get_results_with_iters(iters):
|
|
e = os.environ
|
|
e["SWIFT_DETERMINISTIC_HASHING"] = "1"
|
|
p = subprocess.Popen(
|
|
[
|
|
"sudo",
|
|
"dtrace",
|
|
"-s",
|
|
DTRACE_PATH,
|
|
"-c",
|
|
"%s %s %s %s"
|
|
% (
|
|
data["path"],
|
|
data["test_name"],
|
|
"--num-iters=%d" % iters,
|
|
"--num-samples=2",
|
|
),
|
|
],
|
|
stdout=subprocess.PIPE,
|
|
stderr=open("/dev/null", "w"),
|
|
env=e,
|
|
universal_newlines=True,
|
|
)
|
|
results = [x for x in p.communicate()[0].split("\n") if len(x) > 0]
|
|
return [
|
|
x.split(",")[1] for x in results[results.index("DTRACE RESULTS") + 1 :]
|
|
]
|
|
|
|
iter_2_results = get_results_with_iters(2)
|
|
iter_3_results = get_results_with_iters(3)
|
|
iter_5_results = get_results_with_iters(5)
|
|
|
|
results = []
|
|
foundInstability = False
|
|
for x in zip(iter_2_results, iter_3_results, iter_5_results):
|
|
result_2 = int(x[0])
|
|
result_3 = int(x[1])
|
|
result_5 = int(x[2])
|
|
|
|
single_iter = result_3 - result_2
|
|
two_iter = result_5 - result_3
|
|
|
|
# We are always doing more work, so these should be the same. Fail
|
|
# if we have a negative number.
|
|
if single_iter < 0 or two_iter < 0:
|
|
foundInstability = True
|
|
|
|
# Our retain traffic should always increase linearly with iteration
|
|
# size.
|
|
if (single_iter * 2) == two_iter:
|
|
foundInstability = True
|
|
|
|
results.append(result_3)
|
|
results.append(single_iter)
|
|
|
|
return DTraceResult(
|
|
test_name, int(not foundInstability), results, self.csv_output
|
|
)
|
|
|
|
|
|
SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
|
|
def parse_args():
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument(
|
|
"-filter",
|
|
type=str,
|
|
default=None,
|
|
help="Filter out any test that does not match the given regex",
|
|
)
|
|
parser.add_argument(
|
|
"--emit-csv",
|
|
default=False,
|
|
action="store_true",
|
|
help="Emit csv output",
|
|
dest="csv_output",
|
|
)
|
|
return parser.parse_args()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
args = parse_args()
|
|
g = DTraceBenchmarkDriver(SWIFT_BIN_DIR, XFAIL_LIST, args.csv_output)
|
|
if g.run(args.filter):
|
|
sys.exit(0)
|
|
else:
|
|
sys.exit(-1)
|