mirror of
https://github.com/apple/swift.git
synced 2025-12-21 12:14:44 +01:00
Bring new Python code in line with subset of PEP 8 used in project.
This commit is contained in:
@@ -30,11 +30,11 @@ DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
def parse_results(res, optset):
|
||||
# Parse lines like this
|
||||
# #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
|
||||
SCORERE = re.compile(r"(\d+),[ \t]*(\w+)," + \
|
||||
",".join([r"[ \t]*([\d.]+)"]*7))
|
||||
SCORERE = re.compile(r"(\d+),[ \t]*(\w+)," +
|
||||
",".join([r"[ \t]*([\d.]+)"] * 7))
|
||||
# The Totals line would be parsed like this.
|
||||
TOTALRE = re.compile(r"()(Totals)," + \
|
||||
",".join([r"[ \t]*([\d.]+)"]*7))
|
||||
TOTALRE = re.compile(r"()(Totals)," +
|
||||
",".join([r"[ \t]*([\d.]+)"] * 7))
|
||||
KEYGROUP = 2
|
||||
VALGROUP = 4
|
||||
MEMGROUP = 9
|
||||
@@ -51,14 +51,14 @@ def parse_results(res, optset):
|
||||
test = {}
|
||||
test['Data'] = [testresult]
|
||||
test['Info'] = {}
|
||||
test['Name'] = "nts.swift/"+optset+"."+testname+".exec"
|
||||
test['Name'] = "nts.swift/" + optset + "." + testname + ".exec"
|
||||
tests.append(test)
|
||||
if testname != 'Totals':
|
||||
mem_testresult = int(m.group(MEMGROUP))
|
||||
mem_test = {}
|
||||
mem_test['Data'] = [mem_testresult]
|
||||
mem_test['Info'] = {}
|
||||
mem_test['Name'] = "nts.swift/mem_maxrss."+optset+"."+testname+".mem"
|
||||
mem_test['Name'] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
|
||||
tests.append(mem_test)
|
||||
return tests
|
||||
|
||||
@@ -85,7 +85,7 @@ def instrument_test(driver_path, test, num_samples):
|
||||
)
|
||||
peak_memory = re.match('\s*(\d+)\s*maximum resident set size',
|
||||
test_output_raw.split('\n')[-15]).group(1)
|
||||
test_outputs.append(test_output_raw.split()[1].split(',') + \
|
||||
test_outputs.append(test_output_raw.split()[1].split(',') +
|
||||
[peak_memory])
|
||||
|
||||
# Average sample results
|
||||
@@ -102,7 +102,7 @@ def instrument_test(driver_path, test, num_samples):
|
||||
for i in range(AVG_START_INDEX, len(test_output)):
|
||||
avg_test_output[i] += int(test_output[i])
|
||||
for i in range(AVG_START_INDEX, len(avg_test_output)):
|
||||
avg_test_output[i] = int(round(avg_test_output[i] / \
|
||||
avg_test_output[i] = int(round(avg_test_output[i] /
|
||||
float(len(test_outputs))))
|
||||
avg_test_output[NUM_SAMPLES_INDEX] = num_samples
|
||||
avg_test_output[MIN_INDEX] = min(test_outputs,
|
||||
@@ -152,8 +152,8 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
|
||||
only run tests included in it."""
|
||||
(total_tests, total_min, total_max, total_mean) = (0, 0, 0, 0)
|
||||
output = []
|
||||
headings = ['#', 'TEST','SAMPLES','MIN(μs)','MAX(μs)','MEAN(μs)','SD(μs)',
|
||||
'MEDIAN(μs)','MAX_RSS(B)']
|
||||
headings = ['#', 'TEST', 'SAMPLES', 'MIN(μs)', 'MAX(μs)', 'MEAN(μs)',
|
||||
'SD(μs)', 'MEDIAN(μs)', 'MAX_RSS(B)']
|
||||
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
|
||||
if verbose and log_directory:
|
||||
print line_format.format(*headings)
|
||||
@@ -182,7 +182,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
|
||||
totals_output = '\n\n' + ','.join(totals)
|
||||
if verbose:
|
||||
if log_directory:
|
||||
print line_format.format(*(['']+totals))
|
||||
print line_format.format(*([''] + totals))
|
||||
else:
|
||||
print totals_output[1:]
|
||||
formatted_output += totals_output
|
||||
@@ -204,7 +204,7 @@ def submit(args):
|
||||
print "\nRunning benchmarks..."
|
||||
for optset in args.optimization:
|
||||
print "Opt level:\t", optset
|
||||
file = os.path.join(args.tests, "Benchmark_"+optset)
|
||||
file = os.path.join(args.tests, "Benchmark_" + optset)
|
||||
try:
|
||||
res = run_benchmarks(file, benchmarks=args.benchmark,
|
||||
num_samples=args.iterations)
|
||||
@@ -227,7 +227,7 @@ def submit(args):
|
||||
|
||||
def run(args):
|
||||
optset = args.optimization
|
||||
file = os.path.join(args.tests, "Benchmark_"+optset)
|
||||
file = os.path.join(args.tests, "Benchmark_" + optset)
|
||||
run_benchmarks(file, benchmarks=args.benchmarks,
|
||||
num_samples=args.iterations, verbose=True,
|
||||
log_directory=args.output_dir,
|
||||
@@ -260,10 +260,9 @@ def compare(args):
|
||||
recent_logs = {}
|
||||
for branch_dir in [current_branch_dir, master_branch_dir]:
|
||||
for opt in ['O', 'Onone']:
|
||||
recent_logs[os.path.basename(branch_dir) + '_' + opt] = \
|
||||
sorted(glob.glob(os.path.join(branch_dir,
|
||||
'Benchmark_' + opt + '-*.log')), key=os.path.getctime,
|
||||
reverse=True)
|
||||
recent_logs[os.path.basename(branch_dir) + '_' + opt] = sorted(
|
||||
glob.glob(os.path.join(branch_dir, 'Benchmark_' + opt + '-*.log')),
|
||||
key=os.path.getctime, reverse=True)
|
||||
|
||||
if current_branch == 'master':
|
||||
if len(recent_logs['master_O']) > 1 and \
|
||||
@@ -323,7 +322,7 @@ def main():
|
||||
submit_parser = subparsers.add_parser('submit',
|
||||
help='run benchmarks and submit results to LNT')
|
||||
submit_parser.add_argument('-t', '--tests',
|
||||
help='directory containing Benchmark_O{,none,unchecked} ' + \
|
||||
help='directory containing Benchmark_O{,none,unchecked} ' +
|
||||
'(default: DRIVER_DIR)',
|
||||
default=DRIVER_DIR)
|
||||
submit_parser.add_argument('-m', '--machine', required=True,
|
||||
@@ -345,7 +344,7 @@ def main():
|
||||
run_parser = subparsers.add_parser('run',
|
||||
help='run benchmarks and output results to stdout')
|
||||
run_parser.add_argument('-t', '--tests',
|
||||
help='directory containing Benchmark_O{,none,unchecked} ' + \
|
||||
help='directory containing Benchmark_O{,none,unchecked} ' +
|
||||
'(default: DRIVER_DIR)',
|
||||
default=DRIVER_DIR)
|
||||
run_parser.add_argument('-i', '--iterations',
|
||||
|
||||
@@ -44,8 +44,7 @@ class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):
|
||||
print "Running {}...".format(test_name)
|
||||
sys.stdout.flush()
|
||||
status = subprocess.call([data['path'], data['test_name'], '--num-iters=2'],
|
||||
env=data['env'],
|
||||
stderr=open('/dev/null', 'w'),
|
||||
env=data['env'], stderr=open('/dev/null', 'w'),
|
||||
stdout=open('/dev/null', 'w'))
|
||||
return GuardMallocResult(test_name, status)
|
||||
|
||||
@@ -57,4 +56,3 @@ if __name__ == "__main__":
|
||||
sys.exit(0)
|
||||
else:
|
||||
sys.exit(-1)
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# ===----------------------------------------------------------------------===//
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import subprocess
|
||||
@@ -64,6 +63,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
|
||||
def __init__(self, binary, xfail_list):
|
||||
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
|
||||
enable_parallel=True)
|
||||
|
||||
def prepare_input(self, name):
|
||||
return {}
|
||||
|
||||
@@ -75,8 +75,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
|
||||
p = subprocess.Popen([data['path'], "--run-all", "--num-samples=2",
|
||||
"--num-iters={}".format(2), data['test_name']],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
status = p.wait()
|
||||
output = p.stdout.readlines()
|
||||
p.wait()
|
||||
error_out = p.stderr.readlines()
|
||||
except:
|
||||
print("Child Process Failed! (%s,%s)" % (data['path'], data['test_name']))
|
||||
|
||||
@@ -19,27 +19,26 @@
|
||||
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | column -s, -t
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
|
||||
VERBOSE=0
|
||||
VERBOSE = 0
|
||||
|
||||
# #,TEST,SAMPLES,MIN(ms),MAX(ms),MEAN(ms),SD(ms),MEDIAN(ms)
|
||||
SCORERE=re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
TOTALRE=re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
KEYGROUP=2
|
||||
VALGROUP=4
|
||||
NUMGROUP=1
|
||||
SCORERE = re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
TOTALRE = re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
KEYGROUP = 2
|
||||
VALGROUP = 4
|
||||
NUMGROUP = 1
|
||||
|
||||
IsTime=1
|
||||
ShowSpeedup=1
|
||||
PrintAllScores=0
|
||||
IsTime = 1
|
||||
ShowSpeedup = 1
|
||||
PrintAllScores = 0
|
||||
|
||||
def parseInt(word):
|
||||
try:
|
||||
return int(word)
|
||||
except:
|
||||
raise ScoreParserException("Expected integer value, not "+word)
|
||||
raise Exception("Expected integer value, not " + word)
|
||||
|
||||
def getScores(fname):
|
||||
scores = {}
|
||||
@@ -48,7 +47,8 @@ def getScores(fname):
|
||||
f = open(fname)
|
||||
try:
|
||||
for line in f:
|
||||
if VERBOSE: print "Parsing", line,
|
||||
if VERBOSE:
|
||||
print "Parsing", line,
|
||||
m = SCORERE.match(line)
|
||||
is_total = False
|
||||
if not m:
|
||||
@@ -57,7 +57,8 @@ def getScores(fname):
|
||||
if not m:
|
||||
continue
|
||||
|
||||
if VERBOSE: print " match", m.group(KEYGROUP), m.group(VALGROUP)
|
||||
if VERBOSE:
|
||||
print " match", m.group(KEYGROUP), m.group(VALGROUP)
|
||||
|
||||
if not m.group(KEYGROUP) in scores:
|
||||
scores[m.group(KEYGROUP)] = []
|
||||
@@ -90,31 +91,34 @@ def compareScores(key, score1, score2, runs, num):
|
||||
bestscore1 = score
|
||||
if isMaxScore(newscore=score, maxscore=worstscore1, invert=minworst):
|
||||
worstscore1 = score
|
||||
if PrintAllScores: print ("%d" % score).rjust(16),
|
||||
if PrintAllScores:
|
||||
print ("%d" % score).rjust(16),
|
||||
for score in score2:
|
||||
if isMaxScore(newscore=score, maxscore=bestscore2, invert=minbest):
|
||||
bestscore2 = score
|
||||
if isMaxScore(newscore=score, maxscore=worstscore2, invert=minworst):
|
||||
worstscore2 = score
|
||||
if PrintAllScores: print ("%d" % score).rjust(16),
|
||||
if PrintAllScores:
|
||||
print ("%d" % score).rjust(16),
|
||||
r += 1
|
||||
while r < runs:
|
||||
if PrintAllScores: print ("0").rjust(9),
|
||||
if PrintAllScores:
|
||||
print ("0").rjust(9),
|
||||
r += 1
|
||||
|
||||
if not PrintAllScores:
|
||||
print ("%d" % bestscore1).rjust(16),
|
||||
print ("%d" % bestscore2).rjust(16),
|
||||
|
||||
print ("%+d" % (bestscore2-bestscore1)).rjust(9),
|
||||
print ("%+d" % (bestscore2 - bestscore1)).rjust(9),
|
||||
|
||||
if bestscore1 != 0 and bestscore2 != 0:
|
||||
print ("%+.1f%%"%(((float(bestscore2)/bestscore1)-1)*100)).rjust(9),
|
||||
print ("%+.1f%%" % (((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9),
|
||||
if ShowSpeedup:
|
||||
Num, Den = float(bestscore2), float(bestscore1)
|
||||
if IsTime:
|
||||
Num, Den = Den, Num
|
||||
print ("%.2fx"%(Num/Den)).rjust(9),
|
||||
print ("%.2fx" % (Num / Den)).rjust(9),
|
||||
else:
|
||||
print "*".rjust(9),
|
||||
if ShowSpeedup:
|
||||
@@ -165,21 +169,25 @@ if __name__ == '__main__':
|
||||
if runs2 > runs:
|
||||
runs = runs2
|
||||
|
||||
if VERBOSE: print scores1; print scores2
|
||||
if VERBOSE:
|
||||
print scores1
|
||||
print scores2
|
||||
|
||||
keys = [f for f in set(scores1.keys() + scores2.keys())]
|
||||
keys.sort()
|
||||
if VERBOSE:
|
||||
print "comparing ", file1, "vs", file2, "=",
|
||||
if IsTime: print file1, "/", file2
|
||||
else: print file2, "/", file1
|
||||
if IsTime:
|
||||
print file1, "/", file2
|
||||
else:
|
||||
print file2, "/", file1
|
||||
|
||||
print "#".rjust(3),
|
||||
print "TEST".ljust(25),
|
||||
if PrintAllScores:
|
||||
for i in range(0,runs):
|
||||
for i in range(0, runs):
|
||||
print ("OLD_RUN%d" % i).rjust(9),
|
||||
for i in range(0,runs):
|
||||
for i in range(0, runs):
|
||||
print ("NEW_RUN%d" % i).rjust(9),
|
||||
else:
|
||||
print "BEST_OLD_MIN(μs)".rjust(17),
|
||||
@@ -187,10 +195,10 @@ if __name__ == '__main__':
|
||||
print 'DELTA'.rjust(9), '%DELTA'.rjust(9), 'SPEEDUP'.rjust(9)
|
||||
|
||||
for key in keys:
|
||||
if not key in scores1:
|
||||
if key not in scores1:
|
||||
print key, "not in", file1
|
||||
continue
|
||||
if not key in scores2:
|
||||
if key not in scores2:
|
||||
print key, "not in", file2
|
||||
continue
|
||||
compareScores(key, scores1[key], scores2[key], runs, nums[key])
|
||||
|
||||
@@ -62,6 +62,7 @@ if __name__ == '__main__':
|
||||
content = open(filepath).read()
|
||||
matches = re.findall(r'func run_(.*?)\(', content)
|
||||
return filter(lambda x: x not in ignored_run_funcs, matches)
|
||||
|
||||
def find_run_funcs(dirs):
|
||||
ret_run_funcs = []
|
||||
for d in dirs:
|
||||
@@ -87,4 +88,3 @@ if __name__ == '__main__':
|
||||
imports=imports,
|
||||
run_funcs=run_funcs)
|
||||
)
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# ===----------------------------------------------------------------------===//
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import multiprocessing
|
||||
import re
|
||||
@@ -85,7 +84,7 @@ class BenchmarkDriver(object):
|
||||
results = None
|
||||
if self.enable_parallel:
|
||||
p = multiprocessing.Pool()
|
||||
z = zip([self]*len(prepared_input), prepared_input)
|
||||
z = zip([self] * len(prepared_input), prepared_input)
|
||||
results = p.map(_unwrap_self, z)
|
||||
else:
|
||||
results = map(self.process_input, prepared_input)
|
||||
@@ -112,4 +111,3 @@ class BenchmarkDriver(object):
|
||||
has_failure = reduce(max, [d['has_failure']for d in self.data])
|
||||
self.print_data(self.data, max_test_len)
|
||||
return not has_failure
|
||||
|
||||
|
||||
@@ -62,32 +62,12 @@ import json
|
||||
import re
|
||||
# Parse lines like this
|
||||
# #,TEST,SAMPLES,MIN(ms),MAX(ms),MEAN(ms),SD(ms),MEDIAN(ms)
|
||||
SCORERE=re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
SCORERE = re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
|
||||
# The Totals line would be parsed like this.
|
||||
TOTALRE=re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
KEYGROUP=2
|
||||
VALGROUP=4
|
||||
|
||||
def getScores(fname):
|
||||
scores = {}
|
||||
runs = 0
|
||||
f = open(fname)
|
||||
try:
|
||||
for line in f:
|
||||
if VERBOSE: print "Parsing", line,
|
||||
m = SCORERE.match(line)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
if not m.group(KEYGROUP) in scores:
|
||||
scores[m.group(KEYGROUP)] = []
|
||||
scores[m.group(KEYGROUP)].append(parseFloat(m.group(VALGROUP)))
|
||||
if len(scores[m.group(KEYGROUP)]) > runs:
|
||||
runs = len(scores[m.group(KEYGROUP)])
|
||||
finally:
|
||||
f.close()
|
||||
return scores, runs
|
||||
TOTALRE = re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
|
||||
KEYGROUP = 2
|
||||
VALGROUP = 4
|
||||
|
||||
if __name__ == "__main__":
|
||||
data = {}
|
||||
|
||||
Reference in New Issue
Block a user