1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
The JS Shell Test Harness.
See the adjacent README.txt for more details.
"""
from __future__ import print_function
import os, sys, textwrap, platform
from os.path import abspath, dirname, isfile, realpath
from contextlib import contextmanager
from copy import copy
from subprocess import list2cmdline, call
from lib.tests import RefTestCase, get_jitflags, get_cpu_count, \
get_environment_overlay, change_env
from lib.results import ResultsSink
from lib.progressbar import ProgressBar
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
from lib.tasks_unix import run_all_tests
else:
from lib.tasks_win import run_all_tests
@contextmanager
def changedir(dirname):
pwd = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(pwd)
def parse_args():
"""
Parse command line arguments.
Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
options :object: The raw OptionParser output.
js_shell :str: The absolute location of the shell to test with.
requested_paths :set<str>: Test paths specially requested on the CLI.
excluded_paths :set<str>: Test paths specifically excluded by the CLI.
"""
from optparse import OptionParser, OptionGroup
op = OptionParser(usage=textwrap.dedent("""
%prog [OPTIONS] JS_SHELL [TESTS]
Shell output format: [ pass | fail | timeout | skip ] progress | time
""").strip())
op.add_option('--xul-info', dest='xul_info_src',
help='config data for xulRuntime'
' (avoids search for config/autoconf.mk)')
harness_og = OptionGroup(op, "Harness Controls",
"Control how tests are run.")
harness_og.add_option('-j', '--worker-count', type=int,
default=max(1, get_cpu_count()),
help='Number of tests to run in parallel'
' (default %default)')
harness_og.add_option('-t', '--timeout', type=float, default=150.0,
help='Set maximum time a test is allows to run'
' (in seconds).')
harness_og.add_option('-a', '--args', dest='shell_args', default='',
help='Extra args to pass to the JS shell.')
harness_og.add_option('--jitflags', dest='jitflags', default='none',
type='string',
help='IonMonkey option combinations. One of all,'
' debug, ion, and none (default %default).')
harness_og.add_option('--tbpl', action='store_true',
help='Runs each test in all configurations tbpl'
' tests.')
harness_og.add_option('--tbpl-debug', action='store_true',
help='Runs each test in some faster configurations'
' tbpl tests.')
harness_og.add_option('-g', '--debug', action='store_true',
help='Run a test in debugger.')
harness_og.add_option('--debugger', default='gdb -q --args',
help='Debugger command.')
harness_og.add_option('-J', '--jorendb', action='store_true',
help='Run under JS debugger.')
harness_og.add_option('--passthrough', action='store_true',
help='Run tests with stdin/stdout attached to'
' caller.')
harness_og.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
help="instead of running tests, use them to test the "
"Reflect.stringify code in specified file")
harness_og.add_option('--valgrind', action='store_true',
help='Run tests in valgrind.')
harness_og.add_option('--valgrind-args', default='',
help='Extra args to pass to valgrind.')
harness_og.add_option('--rr', action='store_true',
help='Run tests under RR record-and-replay debugger.')
harness_og.add_option('-C', '--check-output', action='store_true',
help='Run tests to check output for different jit-flags')
op.add_option_group(harness_og)
input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
input_og.add_option('-f', '--file', dest='test_file', action='append',
help='Get tests from the given file.')
input_og.add_option('-x', '--exclude-file', action='append',
help='Exclude tests from the given file.')
input_og.add_option('-d', '--exclude-random', dest='random',
action='store_false',
help='Exclude tests marked as "random."')
input_og.add_option('--run-skipped', action='store_true',
help='Run tests marked as "skip."')
input_og.add_option('--run-only-skipped', action='store_true',
help='Run only tests marked as "skip."')
input_og.add_option('--run-slow-tests', action='store_true',
help='Do not skip tests marked as "slow."')
input_og.add_option('--no-extensions', action='store_true',
help='Run only tests conforming to the ECMAScript 5'
' standard.')
input_og.add_option('--repeat', type=int, default=1,
help='Repeat tests the given number of times.')
op.add_option_group(input_og)
output_og = OptionGroup(op, "Output",
"Modify the harness and tests output.")
output_og.add_option('-s', '--show-cmd', action='store_true',
help='Show exact commandline used to run each test.')
output_og.add_option('-o', '--show-output', action='store_true',
help="Print each test's output to the file given by"
" --output-file.")
output_og.add_option('-F', '--failed-only', action='store_true',
help="If a --show-* option is given, only print"
" output for failed tests.")
output_og.add_option('--no-show-failed', action='store_true',
help="Don't print output for failed tests"
" (no-op with --show-output).")
output_og.add_option('-O', '--output-file',
help='Write all output to the given file'
' (default: stdout).')
output_og.add_option('--failure-file',
help='Write all not-passed tests to the given file.')
output_og.add_option('--no-progress', dest='hide_progress',
action='store_true',
help='Do not show the progress bar.')
output_og.add_option('--tinderbox', dest='format', action='store_const',
const='automation',
help='Use automation-parseable output format.')
output_og.add_option('--format', dest='format', default='none',
type='choice', choices=['automation', 'none'],
help='Output format. Either automation or none'
' (default %default).')
op.add_option_group(output_og)
special_og = OptionGroup(op, "Special",
"Special modes that do not run tests.")
special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
help='Generate reftest manifest files.')
op.add_option_group(special_og)
options, args = op.parse_args()
# Acquire the JS shell given on the command line.
options.js_shell = None
requested_paths = set()
if len(args) > 0:
options.js_shell = abspath(args[0])
requested_paths |= set(args[1:])
# If we do not have a shell, we must be in a special mode.
if options.js_shell is None and not options.make_manifests:
op.error('missing JS_SHELL argument')
# Valgrind, gdb, and rr are mutually exclusive.
if sum(map(lambda e: 1 if e else 0, [options.valgrind, options.debug, options.rr])) > 1:
op.error("--valgrind, --debug, and --rr are mutually exclusive.")
# Fill the debugger field, as needed.
if options.debug:
if options.debugger == 'lldb':
debugger_prefix = ['lldb', '--']
else:
debugger_prefix = options.debugger.split()
else:
debugger_prefix = []
if options.valgrind:
debugger_prefix = ['valgrind'] + options.valgrind_args.split()
if os.uname()[0] == 'Darwin':
debugger_prefix.append('--dsymutil=yes')
options.show_output = True
if options.rr:
debugger_prefix = ['rr', 'record']
js_cmd_args = options.shell_args.split()
if options.jorendb:
options.passthrough = True
options.hide_progress = True
options.worker_count = 1
debugger_path = realpath(os.path.join(
abspath(dirname(abspath(__file__))),
'..', '..', 'examples', 'jorendb.js'))
js_cmd_args.extend(['-d', '-f', debugger_path, '--'])
prefix = RefTestCase.build_js_cmd_prefix(options.js_shell, js_cmd_args,
debugger_prefix)
# If files with lists of tests to run were specified, add them to the
# requested tests set.
if options.test_file:
for test_file in options.test_file:
requested_paths |= set(
[line.strip() for line in open(test_file).readlines()])
# If files with lists of tests to exclude were specified, add them to the
# excluded tests set.
excluded_paths = set()
if options.exclude_file:
for filename in options.exclude_file:
try:
fp = open(filename, 'r')
for line in fp:
if line.startswith('#'): continue
line = line.strip()
if not line: continue
excluded_paths |= set((line,))
finally:
fp.close()
# Handle output redirection, if requested and relevant.
options.output_fp = sys.stdout
if options.output_file:
if not options.show_cmd:
options.show_output = True
try:
options.output_fp = open(options.output_file, 'w')
except IOError as ex:
raise SystemExit("Failed to open output file: " + str(ex))
# Hide the progress bar if it will get in the way of other output.
options.hide_progress = (options.format == 'automation' or
not ProgressBar.conservative_isatty() or
options.hide_progress)
return (options, prefix, requested_paths, excluded_paths)
def load_tests(options, requested_paths, excluded_paths):
"""
Returns a tuple: (skipped_tests, test_list)
test_count: [int] Number of tests that will be in test_gen
test_gen: [iterable<Test>] Tests found that should be run.
"""
import lib.manifest as manifest
if options.js_shell is None:
xul_tester = manifest.NullXULInfoTester()
else:
if options.xul_info_src is None:
xul_info = manifest.XULInfo.create(options.js_shell)
else:
xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':')
xul_debug = xul_debug.lower() is 'true'
xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
xul_tester = manifest.XULInfoTester(xul_info, options.js_shell)
test_dir = dirname(abspath(__file__))
test_count = manifest.count_tests(test_dir, requested_paths, excluded_paths)
test_gen = manifest.load_reftests(test_dir, requested_paths, excluded_paths,
xul_tester)
if options.test_reflect_stringify is not None:
def trs_gen(tests):
for test in tests:
test.test_reflect_stringify = options.test_reflect_stringify
# Even if the test is not normally expected to pass, we still
# expect reflect-stringify to be able to handle it.
test.expect = True
test.random = False
test.slow = False
yield test
test_gen = trs_gen(test_gen)
if options.make_manifests:
manifest.make_manifests(options.make_manifests, test_gen)
sys.exit()
# Create a new test list. Apply each TBPL configuration to every test.
flags_list = None
if options.tbpl:
flags_list = get_jitflags('all')
elif options.tbpl_debug:
flags_list = get_jitflags('debug')
else:
flags_list = get_jitflags(options.jitflags, none=None)
if flags_list:
def flag_gen(tests):
for test in tests:
for jitflags in flags_list:
tmp_test = copy(test)
tmp_test.jitflags = copy(test.jitflags)
tmp_test.jitflags.extend(jitflags)
yield tmp_test
test_count = test_count * len(flags_list)
test_gen = flag_gen(test_gen)
if options.test_file:
paths = set()
for test_file in options.test_file:
paths |= set(
[line.strip() for line in open(test_file).readlines()])
test_gen = (_ for _ in test_gen if _.path in paths)
if options.no_extensions:
pattern = os.sep + 'extensions' + os.sep
test_gen = (_ for _ in test_gen if pattern not in _.path)
if not options.random:
test_gen = (_ for _ in test_gen if not _.random)
if options.run_only_skipped:
options.run_skipped = True
test_gen = (_ for _ in test_gen if not _.enable)
if not options.run_slow_tests:
test_gen = (_ for _ in test_gen if not _.slow)
if options.repeat:
test_gen = (test for test in test_gen for i in range(options.repeat))
test_count *= options.repeat
return test_count, test_gen
def main():
options, prefix, requested_paths, excluded_paths = parse_args()
if options.js_shell is not None and not (isfile(options.js_shell) and
os.access(options.js_shell, os.X_OK)):
if (platform.system() != 'Windows' or
isfile(options.js_shell) or not
isfile(options.js_shell + ".exe") or not
os.access(options.js_shell + ".exe", os.X_OK)):
print('Could not find executable shell: ' + options.js_shell)
return 1
test_count, test_gen = load_tests(options, requested_paths, excluded_paths)
test_environment = get_environment_overlay(options.js_shell)
if test_count == 0:
print('no tests selected')
return 1
test_dir = dirname(abspath(__file__))
if options.debug:
if test_count > 1:
print('Multiple tests match command line arguments,'
' debugger can only run one')
for tc in test_gen:
print(' {}'.format(tc.path))
return 2
cmd = test_gen.next().get_command(prefix)
if options.show_cmd:
print(list2cmdline(cmd))
with changedir(test_dir), change_env(test_environment):
call(cmd)
return 0
with changedir(test_dir), change_env(test_environment):
results = ResultsSink(options, test_count)
try:
for out in run_all_tests(test_gen, prefix, results.pb, options):
results.push(out)
results.finish(True)
except KeyboardInterrupt:
results.finish(False)
return 0 if results.all_passed() else 1
return 0
if __name__ == '__main__':
sys.exit(main())
|