summaryrefslogtreecommitdiffstats
path: root/js/src/tests/lib/tests.py
blob: 1de56dc1c4d8f4500fe9ff4eb304c92197e79521 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# Library for JSTest tests.
#
# This contains classes that represent an individual test, including
# metadata, and know how to run the tests and determine failures.

import datetime, os, sys, time
from contextlib import contextmanager
from subprocess import Popen, PIPE
from threading import Thread

from results import TestOutput

# When run on tbpl, we run each test multiple times with the following
# arguments.
JITFLAGS = {
    'all': [
        [], # no flags, normal baseline and ion
        ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
        ['--ion-eager', '--ion-offthread-compile=off', '--non-writable-jitcode',
         '--ion-check-range-analysis', '--ion-extra-checks', '--no-sse3', '--no-threads'],
        ['--baseline-eager'],
        ['--no-baseline', '--no-ion'],
    ],
    # used by jit_test.py
    'ion': [
        ['--baseline-eager'],
        ['--ion-eager', '--ion-offthread-compile=off']
    ],
    # Run reduced variants on debug builds, since they take longer time.
    'debug': [
        [], # no flags, normal baseline and ion
        ['--ion-eager', '--ion-offthread-compile=off'], # implies --baseline-eager
        ['--baseline-eager'],
    ],
    # Interpreter-only, for tools that cannot handle binary code generation.
    'interp': [
        ['--no-baseline', '--no-asmjs', '--no-wasm', '--no-native-regexp']
    ],
    'none': [
        [] # no flags, normal baseline and ion
    ]
}

def get_jitflags(variant, **kwargs):
    if variant not in JITFLAGS:
        print('Invalid jitflag: "{}"'.format(variant))
        sys.exit(1)
    if variant == 'none' and 'none' in kwargs:
        return kwargs['none']
    return JITFLAGS[variant]

def valid_jitflags():
    return JITFLAGS.keys()

def get_environment_overlay(js_shell):
    """
    Build a dict of additional environment variables that must be set to run
    tests successfully.
    """
    env = {
        # Force Pacific time zone to avoid failures in Date tests.
        'TZ': 'PST8PDT',
        # Force date strings to English.
        'LC_TIME': 'en_US.UTF-8',
        # Tell the shell to disable crash dialogs on windows.
        'XRE_NO_WINDOWS_CRASH_DIALOG': '1',
    }
    # Add the binary's directory to the library search path so that we find the
    # nspr and icu we built, instead of the platform supplied ones (or none at
    # all on windows).
    if sys.platform.startswith('linux'):
        env['LD_LIBRARY_PATH'] = os.path.dirname(js_shell)
    elif sys.platform.startswith('darwin'):
        env['DYLD_LIBRARY_PATH'] = os.path.dirname(js_shell)
    elif sys.platform.startswith('win'):
        env['PATH'] = os.path.dirname(js_shell)
    return env


@contextmanager
def change_env(env_overlay):
    # Apply the overlaid environment and record the current state.
    prior_env = {}
    for key, val in env_overlay.items():
        prior_env[key] = os.environ.get(key, None)
        if 'PATH' in key and key in os.environ:
            os.environ[key] = '{}{}{}'.format(val, os.pathsep, os.environ[key])
        else:
            os.environ[key] = val

    try:
        # Execute with the new environment.
        yield

    finally:
        # Restore the prior environment.
        for key, val in prior_env.items():
            if val is not None:
                os.environ[key] = val
            else:
                del os.environ[key]


def get_cpu_count():
    """
    Guess at a reasonable parallelism count to set as the default for the
    current machine and run.
    """
    # Python 2.6+
    try:
        import multiprocessing
        return multiprocessing.cpu_count()
    except (ImportError, NotImplementedError):
        pass

    # POSIX
    try:
        res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
        if res > 0:
            return res
    except (AttributeError, ValueError):
        pass

    # Windows
    try:
        res = int(os.environ['NUMBER_OF_PROCESSORS'])
        if res > 0:
            return res
    except (KeyError, ValueError):
        pass

    return 1


class RefTest(object):
    """A runnable test."""
    def __init__(self, path):
        self.path = path     # str:  path of JS file relative to tests root dir
        self.options = []    # [str]: Extra options to pass to the shell
        self.jitflags = []   # [str]: JIT flags to pass to the shell
        self.test_reflect_stringify = None  # str or None: path to
                                            # reflect-stringify.js file to test
                                            # instead of actually running tests

    @staticmethod
    def prefix_command(path):
        """Return the '-f shell.js' options needed to run a test with the given
        path."""
        if path == '':
            return ['-f', 'shell.js']
        head, base = os.path.split(path)
        return RefTest.prefix_command(head) \
            + ['-f', os.path.join(path, 'shell.js')]

    def get_command(self, prefix):
        dirname, filename = os.path.split(self.path)
        cmd = prefix + self.jitflags + self.options \
              + RefTest.prefix_command(dirname)
        if self.test_reflect_stringify is not None:
            cmd += [self.test_reflect_stringify, "--check", self.path]
        else:
            cmd += ["-f", self.path]
        return cmd


class RefTestCase(RefTest):
    """A test case consisting of a test and an expected result."""
    def __init__(self, path):
        RefTest.__init__(self, path)
        self.enable = True   # bool: True => run test, False => don't run
        self.expect = True   # bool: expected result, True => pass
        self.random = False  # bool: True => ignore output as 'random'
        self.slow = False    # bool: True => test may run slowly

        # The terms parsed to produce the above properties.
        self.terms = None

        # The tag between |...| in the test header.
        self.tag = None

        # Anything occuring after -- in the test header.
        self.comment = None

    def __str__(self):
        ans = self.path
        if not self.enable:
            ans += ', skip'
        if not self.expect:
            ans += ', fails'
        if self.random:
            ans += ', random'
        if self.slow:
            ans += ', slow'
        if '-d' in self.options:
            ans += ', debugMode'
        return ans

    @staticmethod
    def build_js_cmd_prefix(js_path, js_args, debugger_prefix):
        parts = []
        if debugger_prefix:
            parts += debugger_prefix
        parts.append(js_path)
        if js_args:
            parts += js_args
        return parts

    def __cmp__(self, other):
        if self.path == other.path:
            return 0
        elif self.path < other.path:
            return -1
        return 1

    def __hash__(self):
        return self.path.__hash__()