blob: 0bfaec08050c840f24ee9343005c721580ac8f85 [file] [log] [blame]
Corentin Wallez59382b72020-04-17 20:43:07 +00001#!/usr/bin/env python3
Austin Engca0eac32019-08-28 23:18:10 +00002#
3# Copyright 2019 The Dawn Authors
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17# Based on Angle's perf_test_runner.py
18
19import glob
20import subprocess
21import sys
22import os
23import re
24
shrekshao9e032122023-02-08 00:48:37 +000025# Assume running the dawn_perf_tests build in Chromium checkout
26# Dawn locates at /path/to/Chromium/src/third_party/dawn/
27# Chromium build usually locates at /path/to/Chromium/src/out/Release/
28# You might want to change the base_path if you want to run dawn_perf_tests build from a Dawn standalone build.
Kai Ninomiya01aeca22020-07-15 19:51:17 +000029base_path = os.path.abspath(
shrekshao9e032122023-02-08 00:48:37 +000030 os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../'))
Austin Engca0eac32019-08-28 23:18:10 +000031
32# Look for a [Rr]elease build.
33perftests_paths = glob.glob('out/*elease*')
34metric = 'wall_time'
35max_experiments = 10
36
37binary_name = 'dawn_perf_tests'
38if sys.platform == 'win32':
39 binary_name += '.exe'
40
41scores = []
42
Kai Ninomiya01aeca22020-07-15 19:51:17 +000043
Austin Engca0eac32019-08-28 23:18:10 +000044def mean(data):
45 """Return the sample arithmetic mean of data."""
46 n = len(data)
47 if n < 1:
48 raise ValueError('mean requires at least one data point')
49 return float(sum(data)) / float(n) # in Python 2 use sum(data)/float(n)
50
51
52def sum_of_square_deviations(data, c):
53 """Return sum of square deviations of sequence data."""
54 ss = sum((float(x) - c)**2 for x in data)
55 return ss
56
57
58def coefficient_of_variation(data):
59 """Calculates the population coefficient of variation."""
60 n = len(data)
61 if n < 2:
62 raise ValueError('variance requires at least two data points')
63 c = mean(data)
64 ss = sum_of_square_deviations(data, c)
65 pvar = ss / n # the population variance
66 stddev = (pvar**0.5) # population standard deviation
67 return stddev / c
68
69
70def truncated_list(data, n):
71 """Compute a truncated list, n is truncation size"""
72 if len(data) < n * 2:
73 raise ValueError('list not large enough to truncate')
74 return sorted(data)[n:-n]
75
76
77def truncated_mean(data, n):
78 """Compute a truncated mean, n is truncation size"""
79 return mean(truncated_list(data, n))
80
81
82def truncated_cov(data, n):
83 """Compute a truncated coefficient of variation, n is truncation size"""
84 return coefficient_of_variation(truncated_list(data, n))
85
86
87# Find most recent binary
88newest_binary = None
89newest_mtime = None
90
91for path in perftests_paths:
92 binary_path = os.path.join(base_path, path, binary_name)
93 if os.path.exists(binary_path):
94 binary_mtime = os.path.getmtime(binary_path)
95 if (newest_binary is None) or (binary_mtime > newest_mtime):
96 newest_binary = binary_path
97 newest_mtime = binary_mtime
98
99perftests_path = newest_binary
100
101if perftests_path == None or not os.path.exists(perftests_path):
102 print('Cannot find Release %s!' % binary_name)
103 sys.exit(1)
104
105if len(sys.argv) >= 2:
106 test_name = sys.argv[1]
107
108print('Using test executable: ' + perftests_path)
109print('Test name: ' + test_name)
110
Kai Ninomiya01aeca22020-07-15 19:51:17 +0000111
Austin Engca0eac32019-08-28 23:18:10 +0000112def get_results(metric, extra_args=[]):
113 process = subprocess.Popen(
114 [perftests_path, '--gtest_filter=' + test_name] + extra_args,
115 stdout=subprocess.PIPE,
116 stderr=subprocess.PIPE)
117 output, err = process.communicate()
118
shrekshao9e032122023-02-08 00:48:37 +0000119 output_string = output.decode('utf-8')
120
121 m = re.search(r"Running (\d+) tests", output_string)
Austin Engca0eac32019-08-28 23:18:10 +0000122 if m and int(m.group(1)) > 1:
123 print("Found more than one test result in output:")
shrekshao9e032122023-02-08 00:48:37 +0000124 print(output_string)
Austin Engca0eac32019-08-28 23:18:10 +0000125 sys.exit(3)
126
Bryan Bernhart4b1be082020-03-26 17:46:25 +0000127 pattern = metric + r'.*= ([0-9.]+)'
shrekshao9e032122023-02-08 00:48:37 +0000128 m = re.findall(pattern, output_string)
Bryan Bernhart4b1be082020-03-26 17:46:25 +0000129 if not m:
Austin Engca0eac32019-08-28 23:18:10 +0000130 print("Did not find the metric '%s' in the test output:" % metric)
shrekshao9e032122023-02-08 00:48:37 +0000131 print(output_string)
Austin Engca0eac32019-08-28 23:18:10 +0000132 sys.exit(1)
133
134 return [float(value) for value in m]
135
136
137# Calibrate the number of steps
138steps = get_results("steps", ["--calibration"])[0]
139print("running with %d steps." % steps)
140
141# Loop 'max_experiments' times, running the tests.
142for experiment in range(max_experiments):
143 experiment_scores = get_results(metric, ["--override-steps", str(steps)])
144
145 for score in experiment_scores:
146 sys.stdout.write("%s: %.2f" % (metric, score))
147 scores.append(score)
148
149 if (len(scores) > 1):
150 sys.stdout.write(", mean: %.2f" % mean(scores))
Kai Ninomiya01aeca22020-07-15 19:51:17 +0000151 sys.stdout.write(", variation: %.2f%%" %
152 (coefficient_of_variation(scores) * 100.0))
Austin Engca0eac32019-08-28 23:18:10 +0000153
154 if (len(scores) > 7):
155 truncation_n = len(scores) >> 3
Kai Ninomiya01aeca22020-07-15 19:51:17 +0000156 sys.stdout.write(", truncated mean: %.2f" %
157 truncated_mean(scores, truncation_n))
158 sys.stdout.write(", variation: %.2f%%" %
159 (truncated_cov(scores, truncation_n) * 100.0))
Austin Engca0eac32019-08-28 23:18:10 +0000160
161 print("")