summaryrefslogtreecommitdiffstats
path: root/layout/tools/reftest
diff options
context:
space:
mode:
Diffstat (limited to 'layout/tools/reftest')
-rw-r--r--layout/tools/reftest/Makefile.in18
-rw-r--r--layout/tools/reftest/README.txt617
-rw-r--r--layout/tools/reftest/b2g_start_script.js57
-rw-r--r--layout/tools/reftest/bootstrap.js83
-rwxr-xr-xlayout/tools/reftest/clean-reftest-output.pl38
-rw-r--r--layout/tools/reftest/gaia_lock_screen.js60
-rw-r--r--layout/tools/reftest/install.rdf22
-rw-r--r--layout/tools/reftest/jar.mn7
-rw-r--r--layout/tools/reftest/mach_commands.py393
-rw-r--r--layout/tools/reftest/mach_test_package_commands.py92
-rw-r--r--layout/tools/reftest/moz.build33
-rw-r--r--layout/tools/reftest/output.py135
-rw-r--r--layout/tools/reftest/reftest-analyzer-structured.xhtml649
-rw-r--r--layout/tools/reftest/reftest-analyzer.xhtml647
-rw-r--r--layout/tools/reftest/reftest-content.js1174
-rw-r--r--layout/tools/reftest/reftest-preferences.js127
-rwxr-xr-xlayout/tools/reftest/reftest-to-html.pl118
-rw-r--r--layout/tools/reftest/reftest.jsm2112
-rw-r--r--layout/tools/reftest/reftest.xul14
-rw-r--r--layout/tools/reftest/reftest/__init__.py132
-rw-r--r--layout/tools/reftest/reftestcommandline.py749
-rw-r--r--layout/tools/reftest/remotereftest.py412
-rw-r--r--layout/tools/reftest/runreftest.py747
-rw-r--r--layout/tools/reftest/runreftestb2g.py425
-rw-r--r--layout/tools/reftest/runreftestmulet.py203
25 files changed, 9064 insertions, 0 deletions
diff --git a/layout/tools/reftest/Makefile.in b/layout/tools/reftest/Makefile.in
new file mode 100644
index 000000000..7ff59cd8b
--- /dev/null
+++ b/layout/tools/reftest/Makefile.in
@@ -0,0 +1,18 @@
+# vim: set shiftwidth=8 tabstop=8 autoindent noexpandtab copyindent:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+_DEST_DIR = $(DEPTH)/_tests/reftest
+
+include $(topsrcdir)/config/rules.mk
+
+# We're installing to _tests/reftest
+TARGET_DEPTH = ../..
+include $(topsrcdir)/build/automation-build.mk
+
+# copy harness and the reftest extension bits to $(_DEST_DIR)
+# This needs to happen after jar.mn handling from rules.mk included above.
+# The order of the :: rules ensures that.
+libs::
+ (cd $(DIST)/xpi-stage && tar $(TAR_CREATE_FLAGS) - reftest) | (cd $(_DEST_DIR) && tar -xf -)
diff --git a/layout/tools/reftest/README.txt b/layout/tools/reftest/README.txt
new file mode 100644
index 000000000..c58cf9cdc
--- /dev/null
+++ b/layout/tools/reftest/README.txt
@@ -0,0 +1,617 @@
+Layout Engine Visual Tests (reftest)
+L. David Baron <dbaron@dbaron.org>, Mozilla Corporation
+July 19, 2006
+
+This code is designed to run tests of Mozilla's layout engine. These
+tests consist of an HTML (or other format) file along with a reference
+in the same format. The tests are run based on a manifest file, and for
+each test, PASS or FAIL is reported, and UNEXPECTED is reported if the
+result (PASS or FAIL) was not the expected result noted in the manifest.
+
+Images of the display of both tests are captured, and most test types
+involve comparing these images (e.g., test types == or !=) to determine
+whether the test passed. The captures of the tests are taken in a
+viewport that is 800 pixels wide and 1000 pixels tall, so any content
+outside that area will be ignored (except for any scrollbars that are
+displayed). Ideally, however, tests should be written so that they fit
+within 600x600, since we may in the future want to switch to 600x600 to
+match http://lists.w3.org/Archives/Public/www-style/2012Sep/0562.html .
+
+Why this way?
+=============
+
+Writing HTML tests where the reference rendering is also in HTML is
+harder than simply writing bits of HTML that can be regression-tested by
+comparing the rendering of an older build to that of a newer build
+(perhaps using stored reference images from the older build). However,
+comparing across time has major disadvantages:
+
+ * Comparisons across time either require two runs for every test, or
+ they require stored reference images appropriate for the platform and
+ configuration (often limiting testing to a very specific
+ configuration).
+
+ * Comparisons across time may fail due to expected changes, for
+ example, changes in the default style sheet for HTML, changes in the
+ appearance of form controls, or changes in default preferences like
+ default font size or default colors.
+
+Using tests for which the pass criteria were explicitly chosen allows
+running tests at any time to see whether they still pass.
+
+Manifest Format
+===============
+
+The test manifest format is a plain text file. A line starting with a
+"#" is a comment. Lines may be commented using whitespace followed by
+a "#" and the comment. Each non-blank line (after removal of comments)
+must be one of the following:
+
+1. Inclusion of another manifest
+
+ <failure-type>* include <relative_path>
+
+ <failure-type> is the same as listed below for a test item. As for
+ test items, multiple failure types listed on the same line are
+ combined by using the last matching failure type listed. However,
+ the failure type on a manifest is combined with the failure type on
+ the test (or on a nested manifest) with the rule that the last in the
+ following list wins: fails, random, skip. (In other words, skip
+ always wins, and random beats fails.)
+
+2. A test item
+
+ [ <failure-type> | <preference> ]* [<http>] <type> <url> <url_ref>
+
+ where
+
+ a. <failure-type> (optional) is one of the following:
+
+ fails The test passes if the images of the two renderings DO NOT
+ meet the conditions specified in the <type>.
+
+ fails-if(condition) If the condition is met, the test passes if the
+ images of the two renderings DO NOT meet the
+ conditions of <type>. If the condition is not met,
+ the test passes if the conditions of <type> are met.
+
+ needs-focus The test fails or times out if the reftest window is not
+ focused.
+
+ random The results of the test are random and therefore not to be
+ considered in the output.
+
+ random-if(condition) The results of the test are random if a given
+ condition is met.
+
+ silentfail This test may fail silently, and if that happens it should
+ count as if the test passed. This is useful for cases where
+ silent failure is the intended behavior (for example, in
+ an out of memory situation in JavaScript, we stop running
+ the script silently and immediately, in hopes of reclaiming
+ enough memory to keep the browser functioning).
+
+ silentfail-if(condition) This test may fail silently if the condition
+ is met.
+
+ skip This test should not be run. This is useful when a test fails in a
+ catastrophic way, such as crashing or hanging the browser. Using
+ 'skip' is preferred to simply commenting out the test because we
+ want to report the test failure at the end of the test run.
+
+ skip-if(condition) If the condition is met, the test is not run. This is
+ useful if, for example, the test crashes only on a
+ particular platform (i.e. it allows us to get test
+ coverage on the other platforms).
+
+ slow The test may take a long time to run, so run it if slow tests are
+ either enabled or not disabled (test manifest interpreters may
+ choose whether or not to run such tests by default).
+
+ slow-if(condition) If the condition is met, the test is treated as if
+ 'slow' had been specified. This is useful for tests
+ which are slow only on particular platforms (e.g. a
+ test which exercised out-of-memory behavior might be
+ fast on a 32-bit system but inordinately slow on a
+ 64-bit system).
+
+ fuzzy(maxDiff, diffCount)
+ This allows a test to pass if the pixel value differences are <=
+ maxDiff and the total number of different pixels is <= diffCount.
+ It can also be used with '!=' to ensure that the difference is
+ greater than maxDiff.
+
+ fuzzy-if(condition, maxDiff, diffCount)
+ If the condition is met, the test is treated as if 'fuzzy' had been
+ specified. This is useful if there are differences on particular
+ platforms.
+
+ require-or(cond1&&cond2&&...,fallback)
+ Require some particular setup be performed or environmental
+ condition(s) made true (eg setting debug mode) before the test
+ is run. If any condition is unknown, unimplemented, or fails,
+ revert to the fallback failure-type.
+ Example: require-or(debugMode,skip)
+
+ asserts(count)
+ Loading the test and reference is known to assert exactly
+ count times.
+ NOTE: An asserts() notation with a non-zero count or maxCount
+ suppresses use of a cached canvas for the test with the
+ annotation. However, if later occurrences of the same test
+ are not annotated, they will use the cached canvas
+ (potentially from the load that asserted). This allows
+ repeated use of the same test or reference to be annotated
+ correctly (which may be particularly useful when the uses are
+ in different subdirectories that can be tested independently),
+ but does not force them to be, nor does it force suppression
+ of caching for a common reference when it is the test that
+ asserts.
+
+ asserts(minCount-maxCount)
+ Loading the test and reference is known to assert between
+ minCount and maxCount times, inclusive.
+ NOTE: See above regarding canvas caching.
+
+ asserts-if(condition,count)
+ asserts-if(condition,minCount-maxCount)
+ Same as above, but only if condition is true.
+
+ Conditions are JavaScript expressions *without spaces* in them.
+ They are evaluated in a sandbox in which a limited set of
+ variables are defined. See the BuildConditionSandbox function in
+ layout/tools/reftest.js for details.
+
+ Examples of using conditions:
+ fails-if(winWidget) == test reference
+ asserts-if(cocoaWidget,2) load crashtest
+
+ b. <preference> (optional) is a string of the form
+
+ pref(<name>,<value>)
+ test-pref(<name>,<value>)
+ ref-pref(<name>,<value>)
+
+ where <name> is the name of a preference setting, as seen in
+ about:config, and <value> is the value to which this preference should
+ be set. <value> may be a boolean (true/false), an integer, or a
+ quoted string *without spaces*, according to the type of the preference.
+
+ The preference will be set to the specified value prior to
+ rendering the test and/or reference canvases (pref() applies to
+ both, test-pref() only to the test, and ref-pref() only to the
+ reference), and will be restored afterwards so that following
+ tests are not affected. Note that this feature is only useful for
+ "live" preferences that take effect immediately, without requiring
+ a browser restart.
+
+ c. <http>, if present, is one of the strings (sans quotes) "HTTP" or
+ "HTTP(..)" or "HTTP(../..)" or "HTTP(../../..)", etc. , indicating that
+ the test should be run over an HTTP server because it requires certain
+ HTTP headers or a particular HTTP status. (Don't use this if your test
+ doesn't require this functionality, because it unnecessarily slows down
+ the test.)
+
+ With "HTTP", HTTP tests have the restriction that any resource an HTTP
+ test accesses must be accessed using a relative URL, and the test and
+ the resource must be within the directory containing the reftest
+ manifest that describes the test (or within a descendant directory).
+ The variants "HTTP(..)", etc., can be used to relax this restriction by
+ allowing resources in the parent directory, etc.
+
+ To modify the HTTP status or headers of a resource named FOO, create a
+ sibling file named FOO^headers^ with the following contents:
+
+ [<http-status>]
+ <http-header>*
+
+ <http-status> A line of the form "HTTP ###[ <description>]", where
+ ### indicates the desired HTTP status and <description>
+ indicates a desired HTTP status description, if any.
+ If this line is omitted, the default is "HTTP 200 OK".
+ <http-header> A line in standard HTTP header line format, i.e.
+ "Field-Name: field-value". You may not repeat the use
+ of a Field-Name and must coalesce such headers together,
+ and each header must be specified on a single line, but
+ otherwise the format exactly matches that from HTTP
+ itself.
+
+ HTTP tests may also incorporate SJS files. SJS files provide similar
+ functionality to CGI scripts, in that the response they produce can be
+ dependent on properties of the incoming request. Currently these
+ properties are restricted to method type and headers, but eventually
+ it should be possible to examine data in the body of the request as
+ well when computing the generated response. An SJS file is a JavaScript
+ file with a .sjs extension which defines a global |handleRequest|
+ function (called every time that file is loaded during reftests) in this
+ format:
+
+ function handleRequest(request, response)
+ {
+ response.setStatusLine(request.httpVersion, 200, "OK");
+
+ // You *probably* want this, or else you'll get bitten if you run
+ // reftest multiple times with the same profile.
+ response.setHeader("Cache-Control", "no-cache");
+
+ response.write("any ASCII data you want");
+
+ var outputStream = response.bodyOutputStream;
+ // ...anything else you want to do, synchronously...
+ }
+
+ For more details on exactly which functions and properties are available
+ on request/response in handleRequest, see the nsIHttpRe(quest|sponse)
+ definitions in <netwerk/test/httpserver/nsIHttpServer.idl>.
+
+ d. <type> is one of the following:
+
+ == The test passes if the images of the two renderings are the
+ SAME.
+ != The test passes if the images of the two renderings are
+ DIFFERENT.
+ load The test passes unconditionally if the page loads. url_ref
+ must be omitted, and the test cannot be marked as fails or
+ random. (Used to test for crashes, hangs, assertions, and
+ leaks.)
+ script The loaded page records the test's pass or failure status
+ in a JavaScript data structure accessible through the following
+ API.
+
+ getTestCases() returns an array of test result objects
+ representing the results of the tests performed by the page.
+
+ Each test result object has two methods:
+
+ testPassed() returns true if the test result object passed,
+ otherwise it returns false.
+
+ testDescription() returns a string describing the test
+ result.
+
+ url_ref must be omitted. The test may be marked as fails or
+ random. (Used to test the JavaScript Engine.)
+
+ e. <url> is either a relative file path or an absolute URL for the
+ test page
+
+ f. <url_ref> is either a relative file path or an absolute URL for
+ the reference page
+
+ The only difference between <url> and <url_ref> is that results of
+ the test are reported using <url> only.
+
+3. Specification of a url prefix
+
+ url-prefix <string>
+
+ <string> will be prepended to relative <url> and <url_ref> for all following
+ test items in the manifest.
+
+ <string> will not be prepended to the relative path when including another
+ manifest, e.g. include <relative_path>.
+
+ <string> will not be prepended to any <url> or <url_ref> matching the pattern
+ /^\w+:/. This will prevent the prefix from being applied to any absolute url
+ containing a protocol such as data:, about:, or http:.
+
+ While the typical use of url-prefix is expected to be as the first line of
+ a manifest, it is legal to use it anywhere in a manifest. Subsequent uses
+ of url-prefix overwrite any existing values.
+
+4. Specification of default preferences
+
+ default-preferences <preference>*
+
+ where <preference> is defined above.
+
+ The <preference> settings will be used for all following test items in the
+ manifest.
+
+ If a test item includes its own preference settings, then they will override
+ any settings for preferences of the same names that are set using
+ default-preferences, just as later items within a line override earlier ones.
+
+ A default-preferences line with no <preference> settings following it will
+ reset the set of default preferences to be empty.
+
+ As with url-prefix, default-preferences will often be used at the start of a
+ manifest file so that it applies to all test items, but it is legal for
+ default-preferences to appear anywhere in the manifest. A subsequent
+ default-preferences will reset any previous default preference values and
+ overwrite them with the specified <preference> values.
+
+This test manifest format could be used by other harnesses, such as ones
+that do not depend on XUL, or even ones testing other layout engines.
+
+Running Tests
+=============
+
+(If you're not using a DEBUG build, first set browser.dom.window.dump.enabled
+to true (in about:config, in the profile you'll be using to run the tests).
+Create the option as a new boolean if it doesn't exist already. If you skip
+this step you won't get any output in the terminal.)
+
+At some point in the future there will hopefully be a cleaner way to do
+this. For now, go to your object directory, and run (perhaps using
+MOZ_NO_REMOTE=1 or the -profile <directory> option)
+
+./firefox -reftest /path/to/srcdir/mozilla/layout/reftests/reftest.list > reftest.out
+
+and then search/grep reftest.out for "UNEXPECTED".
+
+There are two scripts provided to convert the reftest.out to HTML.
+clean-reftest-output.pl converts reftest.out into simple HTML, stripping
+lines from the log that aren't relevant. reftest-to-html.pl converts
+the output into html that makes it easier to visually check for
+failures.
+
+Testable Areas
+==============
+
+This framework is capable of testing many areas of the layout engine.
+It is particularly well-suited to testing dynamic change handling (by
+comparison to the static end-result as a reference) and incremental
+layout (comparison of a script-interrupted layout to one that was not).
+However, it is also possible to write tests for many other things that
+can be described in terms of equivalence, for example:
+
+ * CSS cascading could be tested by comparing the result of a
+ complicated set of style rules that makes a word green to <span
+ style="color:green">word</span>.
+
+ * <canvas> compositing operators could be tested by comparing the
+ result of drawing using canvas to a block-level element with the
+ desired color as a CSS background-color.
+
+ * CSS counters could be tested by comparing the text output by counters
+ with a page containing the text written out
+
+ * complex margin collapsing could be tested by comparing the complex
+ case to a case where the margin is written out, or where the margin
+ space is created by an element with 'height' and transparent
+ background
+
+When it is not possible to test by equivalence, it may be possible to
+test by non-equivalence. For example, testing justification in cases
+with more than two words, or more than three different words, is
+difficult. However, it is simple to test that justified text is at
+least displayed differently from left-, center-, or right-aligned text.
+
+Writing Tests
+=============
+
+When writing tests for this framework, it is important for the test to
+depend only on behaviors that are known to be correct and permanent.
+For example, tests should not depend on default font sizes, default
+margins of the body element, the default style sheet used for HTML, the
+default appearance of form controls, or anything else that can be
+avoided.
+
+In general, the best way to achieve this is to make the test and the
+reference identical in as many aspects as possible. For example:
+
+ Good test markup:
+ <div style="color:green"><table><tr><td><span>green
+ </span></td></tr></table></div>
+
+ Good reference markup:
+ <div><table><tr><td><span style="color:green">green
+ </span></td></tr></table></div>
+
+ BAD reference markup:
+ <!-- 3px matches the default cellspacing and cellpadding -->
+ <div style="color:green; padding: 3px">green
+ </div>
+
+ BAD test markup:
+ <!-- span doesn't change the positioning, so skip it -->
+ <div style="color:green"><table><tr><td>green
+ </td></tr></table></div>
+
+Asynchronous Tests: class="reftest-wait"
+========================================
+
+Normally reftest takes a snapshot of the given markup's rendering right
+after the load event fires for content. If your test needs to postpone
+the moment the snapshot is taken, it should make sure a class
+'reftest-wait' is on the root element by the moment the load event
+fires. The easiest way to do this is to put it in the markup, e.g.:
+ <html class="reftest-wait">
+
+When your test is ready, you should remove this class from the root
+element, for example using this code:
+ document.documentElement.className = "";
+
+
+Note that in layout tests it is often enough to trigger layout using
+ document.body.offsetWidth // HTML example
+
+When possible, you should use this technique instead of making your
+test async.
+
+Invalidation Tests: MozReftestInvalidate Event
+==============================================
+
+When a test (or reference) uses reftest-wait, reftest tracks invalidation
+via MozAfterPaint and updates the test image in the same way that
+a regular window would be repainted. Therefore it is possible to test
+invalidation-related bugs by setting up initial content and then
+dynamically modifying it before removing reftest-wait. However, it is
+important to get the timing of these dynamic modifications right so that
+the test doesn't accidentally pass because a full repaint of the window
+was already pending. To help with this, reftest fires one MozReftestInvalidate
+event at the document root element for a reftest-wait test when it is safe to
+make changes that should test invalidation. The event bubbles up to the
+document and window so you can set listeners there too. For example,
+
+function doTest() {
+ document.body.style.border = "";
+ document.documentElement.removeAttribute('class');
+}
+document.addEventListener("MozReftestInvalidate", doTest, false);
+
+Painting Tests: class="reftest-no-paint"
+========================================
+
+If an element shouldn't be painted, set the class "reftest-no-paint" on it
+when doing an invalidation test. Causing a repaint in your
+MozReftestInvalidate handler (for example, by changing the body's background
+colour) will accurately test whether the element is painted.
+
+Opaque Layer Tests: class="reftest-opaque-layer"
+================================================
+
+If an element should be assigned to a PaintedLayer that's opaque, set the class
+"reftest-opaque-layer" on it. This checks whether the layer is opaque during
+the last paint of the test, and it works whether your test is an invalidation
+test or not. In order to pass the test, the element has to have a primary
+frame, and that frame's display items must all be assigned to a single painted
+layer and no other layers, so it can't be used on elements that create stacking
+contexts (active or inactive).
+
+Layerization Tests: reftest-assigned-layer="layer-name"
+=======================================================
+
+If two elements should be assigned to the same PaintedLayer, choose any string
+value as the layer name and set the attribute reftest-assigned-layer="yourname"
+on both elements. Reftest will check whether all elements with the same
+reftest-assigned-layer value share the same layer. It will also test whether
+elements with different reftest-assigned-layer values are assigned to different
+layers.
+The same restrictions as with class="reftest-opaque-layer" apply: All elements
+must have a primary frame, and that frame's display items must all be assigned
+to the same PaintedLayer and no other layers. If these requirements are not
+met, the test will fail.
+
+Snapshot The Whole Window: class="reftest-snapshot-all"
+=======================================================
+
+In a reftest-wait test, to disable testing of invalidation and force the final
+snapshot to be taken of the whole window, set the "reftest-snapshot-all"
+class on the root element.
+
+Avoid triggering flushes: class="reftest-no-flush"
+==================================================
+
+The reftest harness normally triggers flushes by calling
+getBoundingClientRect on the root element. If the root element of the
+test has class="reftest-no-flush", it doesn't do this.
+
+This is useful for testing animations on the compositor thread, since
+the flushing will cause a main thread style update.
+
+Zoom Tests: reftest-zoom="<float>"
+==================================
+
+When the root element of a test has a "reftest-zoom" attribute, that zoom
+factor is applied when rendering the test. The corresponds to the desktop "full
+zoom" style zoom. The reftest document will be 800 device pixels wide by 1000
+device pixels high. The reftest harness assumes that the CSS pixel dimensions
+are 800/zoom and 1000/zoom. For best results therefore, choose zoom factors
+that do not require rounding when we calculate the number of appunits per
+device pixel; i.e. the zoom factor should divide 60, so 60/zoom is an integer.
+
+Setting Scrollport Size: reftest-scrollport-w/h="<int>"
+=======================================================
+
+If either of the "reftest-scrollport-w" and "reftest-scrollport-h" attributes on
+the root element are non-zero, sets the scroll-position-clamping scroll-port
+size to the given size in CSS pixels. This does not affect the size of the
+snapshot that is taken.
+
+Setting Async Scroll Mode: reftest-async-scroll attribute
+=========================================================
+
+If the "reftest-async-scroll" attribute is set on the root element, we try to
+enable async scrolling and zooming for the document. This is unsupported in many
+configurations.
+
+Setting Displayport Dimensions: reftest-displayport-x/y/w/h="<int>"
+===================================================================
+
+If any of the "reftest-displayport-x", "reftest-displayport-y",
+"reftest-displayport-w" and "reftest-displayport-h" attributes on the root
+element are nonzero, sets the displayport dimensions to the given bounds in
+CSS pixels. This does not affect the size of the snapshot that is taken.
+
+When the "reftest-async-scroll" attribute is set on the root element, *all*
+elements in the document are checked for "reftest-displayport-x/y/w/h" and have
+displayports set on them when those attributes are present.
+
+Testing Async Scrolling: reftest-async-scroll-x/y="<int>"
+=========================================================
+
+When the "reftest-async-scroll" attribute is set on the root element, for any
+element where either the "reftest-async-scroll-x" or "reftest-async-scroll-y
+attributes are nonzero, at the end of the test take the snapshot with the given
+offset (in CSS pixels) added to the async scroll offset.
+
+Testing Async Zooming: reftest-async-zoom="<float>"
+=========================================================
+
+When the "reftest-async-zoom" attribute is present on the root element then at
+the end of the test take the snapshot with the given async zoom on top of any
+existing zoom. Content is not re-rendered at the new zoom level. This
+corresponds to the mobile style "pinch zoom" style of zoom. This is unsupported
+in many configurations, and any tests using this will probably want to have
+pref(apz.allow_zooming,true) on them.
+
+Printing Tests: class="reftest-print"
+=====================================
+
+Now that the patch for bug 374050 has landed
+(https://bugzilla.mozilla.org/show_bug.cgi?id=374050), it is possible to
+create reftests that run in a paginated context.
+
+The page size used is 5in wide and 3in tall (with the default half-inch
+margins). This is to allow tests to have less text and to make the
+entire test fit on the screen.
+
+There is a layout/reftests/printing directory for printing reftests; however,
+there is nothing special about this directory. You can put printing reftests
+anywhere that is appropriate.
+
+The suggested first lines for any printing test is
+<!DOCTYPE html><html class="reftest-print">
+<style>html{font-size:12pt}</style>
+
+The reftest-print class on the root element triggers the reftest to
+switch into page mode. Fixing the font size is suggested, although not
+required, because the pages are a fixed size in inches. The switch to page mode
+happens on load if the reftest-wait class is not present; otherwise it happens
+immediately after firing the MozReftestInvalidate event.
+
+The underlying layout support for this mode isn't really complete; it
+doesn't use exactly the same codepath as real print preview/print. In
+particular, scripting and frames are likely to cause problems; it is untested,
+though. That said, it should be sufficient for testing layout issues related
+to pagination.
+
+Plugin and IPC Process Crash Tests: class="reftest-expect-process-crash"
+========================================================================
+
+If you are running a test that causes an out-of-process plugin or IPC process
+under Electrolysis to crash as part of a reftest, this will cause process
+crash minidump files to be left in the profile directory. The test
+infrastructure that runs the reftests will notice these minidump files and
+dump out information from them, and these additional error messages in the logs
+can end up erroneously being associated with other errors from the reftest run.
+They are also confusing, since the appearance of "PROCESS-CRASH" messages in
+the test run output can seem like a real problem, when in fact it is the
+expected behavior.
+
+To indicate to the reftest framework that a test is expecting a plugin or
+IPC process crash, have the test include "reftest-expect-process-crash" as
+one of the root element's classes by the time the test has finished. This will
+cause any minidump files that are generated while running the test to be removed
+and they won't cause any error messages in the test run output.
+
+Skip Forcing A Content Process Layer-Tree Update: reftest-no-sync-layers attribute
+==================================================================================
+
+Normally when an multi-process reftest test ends, we force the content process
+to push a layer-tree update to the compositor before taking the snapshot.
+Setting the "reftest-no-sync-layers" attribute on the root element skips this
+step, enabling testing that layer-tree updates are being correctly generated.
+However the test must manually wait for a MozAfterPaint event before ending.
diff --git a/layout/tools/reftest/b2g_start_script.js b/layout/tools/reftest/b2g_start_script.js
new file mode 100644
index 000000000..7c368412f
--- /dev/null
+++ b/layout/tools/reftest/b2g_start_script.js
@@ -0,0 +1,57 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const { utils: Cu, classes: Cc, interfaces: Ci } = Components;
+
+function setPermissions() {
+ if (__webDriverArguments.length < 2) {
+ return;
+ }
+
+ let serverAddr = __webDriverArguments[0];
+ let serverPort = __webDriverArguments[1];
+ let perms = Cc["@mozilla.org/permissionmanager;1"]
+ .getService(Ci.nsIPermissionManager);
+ let ioService = Cc["@mozilla.org/network/io-service;1"]
+ .getService(Ci.nsIIOService);
+ let uri = ioService.newURI("http://" + serverAddr + ":" + serverPort, null, null);
+ perms.add(uri, "allowXULXBL", Ci.nsIPermissionManager.ALLOW_ACTION);
+}
+
+var cm = Cc["@mozilla.org/categorymanager;1"]
+ .getService(Ci.nsICategoryManager);
+
+// Disable update timers that cause b2g failures.
+if (cm) {
+ cm.deleteCategoryEntry("update-timer", "nsUpdateService", false);
+}
+
+// Load into any existing windows
+var wm = Cc["@mozilla.org/appshell/window-mediator;1"]
+ .getService(Ci.nsIWindowMediator);
+var win = wm.getMostRecentWindow('');
+
+setPermissions();
+
+// Loading this into the global namespace causes intermittent failures.
+// See bug 882888 for more details.
+var reftest = {};
+Cu.import("chrome://reftest/content/reftest.jsm", reftest);
+
+// Prevent display off during testing.
+navigator.mozPower.screenEnabled = true;
+var settingLock = navigator.mozSettings.createLock();
+var settingResult = settingLock.set({
+ 'screen.timeout': 0
+});
+settingResult.onsuccess = function () {
+ dump("Set screen.time to 0\n");
+ // Start the reftests
+ reftest.OnRefTestLoad(win);
+}
+settingResult.onerror = function () {
+ dump("Change screen.time failed\n");
+ // Start the reftests
+ reftest.OnRefTestLoad(win);
+}
diff --git a/layout/tools/reftest/bootstrap.js b/layout/tools/reftest/bootstrap.js
new file mode 100644
index 000000000..c081335c2
--- /dev/null
+++ b/layout/tools/reftest/bootstrap.js
@@ -0,0 +1,83 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const { utils: Cu, interfaces: Ci, classes: Cc, manager: Cm } = Components;
+
+Cu.import("resource://gre/modules/FileUtils.jsm");
+Cu.import("resource://gre/modules/Services.jsm");
+
+var WindowListener = {
+ onOpenWindow: function(win) {
+ Services.wm.removeListener(WindowListener);
+
+ win = win.QueryInterface(Ci.nsIInterfaceRequestor).getInterface(Ci.nsIDOMWindow);
+ win.addEventListener("load", function listener() {
+ win.removeEventListener("load", listener, false);
+
+ // Load into any existing windows.
+ let windows = Services.wm.getEnumerator("navigator:browser");
+ while (windows.hasMoreElements()) {
+ win = windows.getNext().QueryInterface(Ci.nsIDOMWindow);
+ break;
+ }
+
+ Cu.import("chrome://reftest/content/reftest.jsm");
+ win.addEventListener("pageshow", function listener() {
+ win.removeEventListener("pageshow", listener);
+ // Add setTimeout here because windows.innerWidth/Height are not set yet.
+ win.setTimeout(function() {OnRefTestLoad(win);}, 0);
+ });
+ }, false);
+ }
+};
+
+function startup(data, reason) {
+ // b2g is bootstrapped by b2g_start_script.js
+ if (Services.appinfo.widgetToolkit == "gonk") {
+ return;
+ }
+
+ if (Services.appinfo.OS == "Android") {
+ Cm.addBootstrappedManifestLocation(data.installPath);
+ Services.wm.addListener(WindowListener);
+ return;
+ }
+
+ let orig = Services.wm.getMostRecentWindow("navigator:browser");
+
+ let ios = Cc["@mozilla.org/network/io-service;1"]
+ .getService(Ci.nsIIOService2);
+ ios.manageOfflineStatus = false;
+ ios.offline = false;
+
+ let wwatch = Cc["@mozilla.org/embedcomp/window-watcher;1"]
+ .getService(Ci.nsIWindowWatcher);
+ let dummy = wwatch.openWindow(null, "about:blank", "dummy",
+ "chrome,dialog=no,left=800,height=200,width=200,all",null);
+ dummy.onload = function() {
+ // Close pre-existing window
+ orig.close();
+
+ dummy.focus();
+ wwatch.openWindow(null, "chrome://reftest/content/reftest.xul", "_blank",
+ "chrome,dialog=no,all", {});
+ };
+}
+
+function shutdown(data, reason) {
+ if (Services.appinfo.widgetToolkit == "gonk") {
+ return;
+ }
+
+ if (Services.appinfo.OS == "Android") {
+ Services.wm.removeListener(WindowListener);
+ Cm.removedBootstrappedManifestLocation(data.installPath);
+ OnRefTestUnload();
+ Cu.unload("chrome://reftest/content/reftest.jsm");
+ }
+}
+
+
+function install(data, reason) {}
+function uninstall(data, reason) {}
diff --git a/layout/tools/reftest/clean-reftest-output.pl b/layout/tools/reftest/clean-reftest-output.pl
new file mode 100755
index 000000000..b1959281d
--- /dev/null
+++ b/layout/tools/reftest/clean-reftest-output.pl
@@ -0,0 +1,38 @@
+#!/usr/bin/perl
+# vim: set shiftwidth=4 tabstop=8 autoindent expandtab:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This script is intended to be run over the standard output of a
+# reftest run. It will extract the parts of the output run relevant to
+# reftest and HTML-ize the URLs.
+
+use strict;
+
+print <<EOM
+<html>
+<head>
+<title>reftest output</title>
+</head>
+<body>
+<pre>
+EOM
+;
+
+while (<>) {
+ next unless /REFTEST/;
+ chomp;
+ chop if /\r$/;
+ s,(TEST-)([^\|]*) \| ([^\|]*) \|(.*),\1\2: <a href="\3">\3</a>\4,;
+ s,(IMAGE[^:]*): (data:.*),<a href="\2">\1</a>,;
+ print;
+ print "\n";
+}
+
+print <<EOM
+</pre>
+</body>
+</html>
+EOM
+;
diff --git a/layout/tools/reftest/gaia_lock_screen.js b/layout/tools/reftest/gaia_lock_screen.js
new file mode 100644
index 000000000..ca0953e91
--- /dev/null
+++ b/layout/tools/reftest/gaia_lock_screen.js
@@ -0,0 +1,60 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// NOTE: This code was forked from:
+// https://github.com/mozilla-b2g/gaia/blob/master/tests/atoms/gaia_lock_screen.js
+
+'use strict';
+/* globals waitFor, finish */
+/* exported GaiaLockScreen */
+
+var GaiaLockScreen = {
+
+ unlock: function(forcibly) {
+ let setlock = window.wrappedJSObject.SettingsListener.getSettingsLock();
+ let service = window.wrappedJSObject.Service;
+ let obj = {'screen.timeout': 0};
+ setlock.set(obj);
+
+ waitFor(
+ function() {
+ service.request('unlock', { forcibly: forcibly });
+ waitFor(
+ function() {
+ finish(service.locked);
+ },
+ function() {
+ return !service.locked;
+ }
+ );
+ },
+ function() {
+ return !!service;
+ }
+ );
+ },
+
+ lock: function(forcibly) {
+ let service = window.wrappedJSObject.Service;
+ let setlock = window.wrappedJSObject.SettingsListener.getSettingsLock();
+ let obj = {'screen.timeout': 0};
+ setlock.set(obj);
+ waitFor(
+ function() {
+ service.request('lock', { forcibly: forcibly });
+ waitFor(
+ function() {
+ finish(!service.locked);
+ },
+ function() {
+ return service.locked;
+ }
+ );
+ },
+ function() {
+ return !!service;
+ }
+ );
+ }
+};
diff --git a/layout/tools/reftest/install.rdf b/layout/tools/reftest/install.rdf
new file mode 100644
index 000000000..00275c64e
--- /dev/null
+++ b/layout/tools/reftest/install.rdf
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>reftest@mozilla.org</em:id>
+ <em:version>1.0</em:version>
+ <em:type>2</em:type>
+ <em:bootstrap>true</em:bootstrap>
+ <em:targetApplication>
+ <Description>
+ <em:id>toolkit@mozilla.org</em:id>
+#expand <em:minVersion>__MOZILLA_VERSION_U__</em:minVersion>
+#expand <em:maxVersion>__MOZILLA_VERSION_U__</em:maxVersion>
+ </Description>
+ </em:targetApplication>
+ <!-- Front End MetaData -->
+ <em:name>Reftest</em:name>
+ <em:description>Run layout comparison tests.</em:description>
+ <em:creator>L. David Baron</em:creator>
+ </Description>
+</RDF>
diff --git a/layout/tools/reftest/jar.mn b/layout/tools/reftest/jar.mn
new file mode 100644
index 000000000..ce916f06d
--- /dev/null
+++ b/layout/tools/reftest/jar.mn
@@ -0,0 +1,7 @@
+reftest.jar:
+% content reftest %content/
+* content/reftest-content.js (reftest-content.js)
+ content/httpd.jsm (../../../netwerk/test/httpserver/httpd.js)
+ content/StructuredLog.jsm (../../../testing/modules/StructuredLog.jsm)
+* content/reftest.jsm (reftest.jsm)
+ content/reftest.xul (reftest.xul)
diff --git a/layout/tools/reftest/mach_commands.py b/layout/tools/reftest/mach_commands.py
new file mode 100644
index 000000000..e790486ef
--- /dev/null
+++ b/layout/tools/reftest/mach_commands.py
@@ -0,0 +1,393 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, unicode_literals
+
+import os
+import re
+import sys
+import warnings
+import which
+from argparse import Namespace
+
+from mozbuild.base import (
+ MachCommandBase,
+ MachCommandConditions as conditions,
+ MozbuildObject,
+)
+
+from mach.decorators import (
+ CommandProvider,
+ Command,
+)
+
+import reftestcommandline
+
+ADB_NOT_FOUND = '''
+The %s command requires the adb binary to be on your path.
+
+If you have a B2G build, this can be found in
+'%s/out/host/<platform>/bin'.
+'''.lstrip()
+
+GAIA_PROFILE_NOT_FOUND = '''
+The reftest command requires a non-debug gaia profile on Mulet.
+Either pass in --profile, or set the GAIA_PROFILE environment variable.
+
+If you do not have a non-debug gaia profile, you can build one:
+ $ git clone https://github.com/mozilla-b2g/gaia
+ $ cd gaia
+ $ make
+
+The profile should be generated in a directory called 'profile'.
+'''.lstrip()
+
+GAIA_PROFILE_IS_DEBUG = '''
+The reftest command requires a non-debug gaia profile on Mulet.
+The specified profile, %s, is a debug profile.
+
+If you do not have a non-debug gaia profile, you can build one:
+ $ git clone https://github.com/mozilla-b2g/gaia
+ $ cd gaia
+ $ make
+
+The profile should be generated in a directory called 'profile'.
+'''.lstrip()
+
+MARIONETTE_DISABLED = '''
+The reftest command requires a marionette enabled build on Mulet.
+
+Add 'ENABLE_MARIONETTE=1' to your mozconfig file and re-build the application.
+Your currently active mozconfig is %s.
+'''.lstrip()
+
+parser = None
+
+
+class ReftestRunner(MozbuildObject):
+ """Easily run reftests.
+
+ This currently contains just the basics for running reftests. We may want
+ to hook up result parsing, etc.
+ """
+ def __init__(self, *args, **kwargs):
+ MozbuildObject.__init__(self, *args, **kwargs)
+
+ # TODO Bug 794506 remove once mach integrates with virtualenv.
+ build_path = os.path.join(self.topobjdir, 'build')
+ if build_path not in sys.path:
+ sys.path.append(build_path)
+
+ self.tests_dir = os.path.join(self.topobjdir, '_tests')
+ self.reftest_dir = os.path.join(self.tests_dir, 'reftest')
+
+ def _make_shell_string(self, s):
+ return "'%s'" % re.sub("'", r"'\''", s)
+
+ def _setup_objdir(self, args):
+ # reftest imports will happen from the objdir
+ sys.path.insert(0, self.reftest_dir)
+
+ if not args.tests:
+ test_subdir = {
+ "reftest": os.path.join('layout', 'reftests'),
+ "crashtest": os.path.join('layout', 'crashtest'),
+ }[args.suite]
+ args.tests = [test_subdir]
+
+ tests = os.path.join(self.reftest_dir, 'tests')
+ if not os.path.isdir(tests):
+ os.symlink(self.topsrcdir, tests)
+
+ def run_b2g_test(self, b2g_home=None, xre_path=None, **kwargs):
+ """Runs a b2g reftest.
+
+ filter is a regular expression (in JS syntax, as could be passed to the
+ RegExp constructor) to select which reftests to run from the manifest.
+
+ tests is a list of paths. It can be a relative path from the
+ top source directory, an absolute filename, or a directory containing
+ test files.
+
+ suite is the type of reftest to run. It can be one of ('reftest',
+ 'crashtest').
+ """
+ args = Namespace(**kwargs)
+ if args.suite not in ('reftest', 'crashtest'):
+ raise Exception('None or unrecognized reftest suite type.')
+
+ self._setup_objdir(args)
+ import runreftestb2g
+
+ for i, path in enumerate(args.tests):
+ # Non-absolute paths are relative to the packaged directory, which
+ # has an extra tests/ at the start
+ if os.path.exists(os.path.abspath(path)):
+ path = os.path.relpath(path, os.path.join(self.topsrcdir))
+ args.tests[i] = os.path.join('tests', path)
+
+ try:
+ which.which('adb')
+ except which.WhichError:
+ # TODO Find adb automatically if it isn't on the path
+ raise Exception(ADB_NOT_FOUND % ('%s-remote' % args.suite, b2g_home))
+
+ args.b2gPath = b2g_home
+ args.logdir = self.reftest_dir
+ args.httpdPath = os.path.join(self.topsrcdir, 'netwerk', 'test', 'httpserver')
+ args.xrePath = xre_path
+ args.ignoreWindowSize = True
+
+ return runreftestb2g.run_test_harness(parser, args)
+
+ def run_mulet_test(self, **kwargs):
+ """Runs a mulet reftest."""
+ args = Namespace(**kwargs)
+ self._setup_objdir(args)
+
+ import runreftestmulet
+
+ if self.substs.get('ENABLE_MARIONETTE') != '1':
+ print(MARIONETTE_DISABLED % self.mozconfig['path'])
+ return 1
+
+ if not args.profile:
+ gaia_profile = os.environ.get('GAIA_PROFILE')
+ if not gaia_profile:
+ print(GAIA_PROFILE_NOT_FOUND)
+ return 1
+ args.profile = gaia_profile
+
+ if os.path.isfile(os.path.join(args.profile, 'extensions',
+ 'httpd@gaiamobile.org')):
+ print(GAIA_PROFILE_IS_DEBUG % args.profile)
+ return 1
+
+ args.app = self.get_binary_path()
+ args.mulet = True
+
+ if not args.app.endswith('-bin'):
+ args.app = '%s-bin' % args.app
+ if not os.path.isfile(args.app):
+ args.app = args.app[:-len('-bin')]
+
+ return runreftestmulet.run_test_harness(parser, args)
+
+ def run_desktop_test(self, **kwargs):
+ """Runs a reftest, in desktop Firefox."""
+ import runreftest
+
+ args = Namespace(**kwargs)
+ if args.suite not in ('reftest', 'crashtest', 'jstestbrowser'):
+ raise Exception('None or unrecognized reftest suite type.')
+
+ default_manifest = {
+ "reftest": (self.topsrcdir, "layout", "reftests", "reftest.list"),
+ "crashtest": (self.topsrcdir, "testing", "crashtest", "crashtests.list"),
+ "jstestbrowser": (self.topobjdir, "dist", "test-stage", "jsreftest", "tests",
+ "jstests.list")
+ }
+
+ args.extraProfileFiles.append(os.path.join(self.topobjdir, "dist", "plugins"))
+ args.symbolsPath = os.path.join(self.topobjdir, "crashreporter-symbols")
+
+ if not args.tests:
+ args.tests = [os.path.join(*default_manifest[args.suite])]
+
+ if args.suite == "jstestbrowser":
+ args.extraProfileFiles.append(os.path.join(self.topobjdir, "dist",
+ "test-stage", "jsreftest",
+ "tests", "user.js"))
+
+ self.log_manager.enable_unstructured()
+ try:
+ rv = runreftest.run_test_harness(parser, args)
+ finally:
+ self.log_manager.disable_unstructured()
+
+ return rv
+
+ def run_android_test(self, **kwargs):
+ """Runs a reftest, in Firefox for Android."""
+
+ args = Namespace(**kwargs)
+ if args.suite not in ('reftest', 'crashtest', 'jstestbrowser'):
+ raise Exception('None or unrecognized reftest suite type.')
+
+ self._setup_objdir(args)
+ import remotereftest
+
+ default_manifest = {
+ "reftest": (self.topsrcdir, "layout", "reftests", "reftest.list"),
+ "crashtest": (self.topsrcdir, "testing", "crashtest", "crashtests.list"),
+ "jstestbrowser": ("jsreftest", "tests", "jstests.list")
+ }
+
+ if not args.tests:
+ args.tests = [os.path.join(*default_manifest[args.suite])]
+
+ args.extraProfileFiles.append(
+ os.path.join(self.topsrcdir, "mobile", "android", "fonts"))
+
+ hyphenation_path = os.path.join(self.topsrcdir, "intl", "locales")
+
+ for (dirpath, dirnames, filenames) in os.walk(hyphenation_path):
+ for filename in filenames:
+ if filename.endswith('.dic'):
+ args.extraProfileFiles.append(os.path.join(dirpath, filename))
+
+ if not args.httpdPath:
+ args.httpdPath = os.path.join(self.tests_dir, "modules")
+ if not args.symbolsPath:
+ args.symbolsPath = os.path.join(self.topobjdir, "crashreporter-symbols")
+ if not args.xrePath:
+ args.xrePath = os.environ.get("MOZ_HOST_BIN")
+ if not args.app:
+ args.app = self.substs["ANDROID_PACKAGE_NAME"]
+ if not args.utilityPath:
+ args.utilityPath = args.xrePath
+ args.dm_trans = "adb"
+ args.ignoreWindowSize = True
+ args.printDeviceInfo = False
+
+ from mozrunner.devices.android_device import grant_runtime_permissions
+ grant_runtime_permissions(self)
+
+ # A symlink and some path manipulations are required so that test
+ # manifests can be found both locally and remotely (via a url)
+ # using the same relative path.
+ if args.suite == "jstestbrowser":
+ staged_js_dir = os.path.join(self.topobjdir, "dist", "test-stage", "jsreftest")
+ tests = os.path.join(self.reftest_dir, 'jsreftest')
+ if not os.path.isdir(tests):
+ os.symlink(staged_js_dir, tests)
+ args.extraProfileFiles.append(os.path.join(staged_js_dir, "tests", "user.js"))
+ else:
+ tests = os.path.join(self.reftest_dir, "tests")
+ if not os.path.isdir(tests):
+ os.symlink(self.topsrcdir, tests)
+ for i, path in enumerate(args.tests):
+ # Non-absolute paths are relative to the packaged directory, which
+ # has an extra tests/ at the start
+ if os.path.exists(os.path.abspath(path)):
+ path = os.path.relpath(path, os.path.join(self.topsrcdir))
+ args.tests[i] = os.path.join('tests', path)
+
+ self.log_manager.enable_unstructured()
+ try:
+ rv = remotereftest.run_test_harness(parser, args)
+ finally:
+ self.log_manager.disable_unstructured()
+
+ return rv
+
+
+def process_test_objects(kwargs):
+ """|mach test| works by providing a test_objects argument, from
+ which the test path must be extracted and converted into a normal
+ reftest tests argument."""
+
+ if "test_objects" in kwargs:
+ if kwargs["tests"] is None:
+ kwargs["tests"] = []
+ kwargs["tests"].extend(item["path"] for item in kwargs["test_objects"])
+ del kwargs["test_objects"]
+
+
+def get_parser():
+ global parser
+ here = os.path.abspath(os.path.dirname(__file__))
+ build_obj = MozbuildObject.from_environment(cwd=here)
+ if conditions.is_android(build_obj):
+ parser = reftestcommandline.RemoteArgumentsParser()
+ elif conditions.is_mulet(build_obj):
+ parser = reftestcommandline.B2GArgumentParser()
+ else:
+ parser = reftestcommandline.DesktopArgumentsParser()
+ return parser
+
+
+@CommandProvider
+class MachCommands(MachCommandBase):
+ @Command('reftest',
+ category='testing',
+ description='Run reftests (layout and graphics correctness).',
+ parser=get_parser)
+ def run_reftest(self, **kwargs):
+ kwargs["suite"] = "reftest"
+ return self._run_reftest(**kwargs)
+
+ @Command('jstestbrowser',
+ category='testing',
+ description='Run js/src/tests in the browser.',
+ parser=get_parser)
+ def run_jstestbrowser(self, **kwargs):
+ self._mach_context.commands.dispatch("build",
+ self._mach_context,
+ what=["stage-jstests"])
+ kwargs["suite"] = "jstestbrowser"
+ return self._run_reftest(**kwargs)
+
+ @Command('crashtest',
+ category='testing',
+ description='Run crashtests (Check if crashes on a page).',
+ parser=get_parser)
+ def run_crashtest(self, **kwargs):
+ kwargs["suite"] = "crashtest"
+ return self._run_reftest(**kwargs)
+
+ def _run_reftest(self, **kwargs):
+ process_test_objects(kwargs)
+ reftest = self._spawn(ReftestRunner)
+ if conditions.is_android(self):
+ from mozrunner.devices.android_device import verify_android_device
+ verify_android_device(self, install=True, xre=True)
+ return reftest.run_android_test(**kwargs)
+ elif conditions.is_mulet(self):
+ return reftest.run_mulet_test(**kwargs)
+ return reftest.run_desktop_test(**kwargs)
+
+
+# TODO For now b2g commands will only work with the emulator,
+# they should be modified to work with all devices.
+def is_emulator(cls):
+ """Emulator needs to be configured."""
+ return cls.device_name.startswith('emulator')
+
+
+@CommandProvider
+class B2GCommands(MachCommandBase):
+ def __init__(self, context):
+ MachCommandBase.__init__(self, context)
+
+ for attr in ('b2g_home', 'xre_path', 'device_name'):
+ setattr(self, attr, getattr(context, attr, None))
+
+ @Command('reftest-remote', category='testing',
+ description='Run a remote reftest (b2g layout and graphics correctness, remote device).',
+ conditions=[conditions.is_b2g, is_emulator],
+ parser=get_parser)
+ def run_reftest_remote(self, **kwargs):
+ kwargs["suite"] = "reftest"
+ return self._run_reftest(**kwargs)
+
+ @Command('crashtest-remote', category='testing',
+ description='Run a remote crashtest (Check if b2g crashes on a page, remote device).',
+ conditions=[conditions.is_b2g, is_emulator],
+ parser=get_parser)
+ def run_crashtest_remote(self, test_file, **kwargs):
+ kwargs["suite"] = "crashtest"
+ return self._run_reftest(**kwargs)
+
+ def _run_reftest(self, **kwargs):
+ process_test_objects(kwargs)
+ if self.device_name:
+ if self.device_name.startswith('emulator'):
+ emulator = 'arm'
+ if 'x86' in self.device_name:
+ emulator = 'x86'
+ kwargs['emulator'] = emulator
+
+ reftest = self._spawn(ReftestRunner)
+ return reftest.run_b2g_test(self.b2g_home, self.xre_path, **kwargs)
diff --git a/layout/tools/reftest/mach_test_package_commands.py b/layout/tools/reftest/mach_test_package_commands.py
new file mode 100644
index 000000000..e3bf64cb8
--- /dev/null
+++ b/layout/tools/reftest/mach_test_package_commands.py
@@ -0,0 +1,92 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import unicode_literals
+
+import os
+from argparse import Namespace
+from functools import partial
+
+from mach.decorators import (
+ CommandProvider,
+ Command,
+)
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+def run_reftest(context, **kwargs):
+ import mozinfo
+
+ args = Namespace(**kwargs)
+ args.e10s = context.mozharness_config.get('e10s', args.e10s)
+
+ if not args.tests:
+ args.tests = [os.path.join('layout', 'reftests', 'reftest.list')]
+
+ test_root = os.path.join(context.package_root, 'reftest', 'tests')
+ normalize = partial(context.normalize_test_path, test_root)
+ args.tests = map(normalize, args.tests)
+
+ if mozinfo.info.get('buildapp') == 'mobile/android':
+ return run_reftest_android(context, args)
+ return run_reftest_desktop(context, args)
+
+
+def run_reftest_desktop(context, args):
+ from runreftest import run_test_harness
+
+ args.app = args.app or context.firefox_bin
+ args.extraProfileFiles.append(os.path.join(context.bin_dir, 'plugins'))
+ args.utilityPath = context.bin_dir
+
+ return run_test_harness(parser, args)
+
+
+def run_reftest_android(context, args):
+ from remotereftest import run_test_harness
+
+ args.app = args.app or 'org.mozilla.fennec'
+ args.utilityPath = context.hostutils
+ args.xrePath = context.hostutils
+ args.httpdPath = context.module_dir
+ args.dm_trans = 'adb'
+ args.ignoreWindowSize = True
+ args.printDeviceInfo = False
+
+ config = context.mozharness_config
+ if config:
+ args.remoteWebServer = config['remote_webserver']
+ args.httpPort = config['emulator']['http_port']
+ args.sslPort = config['emulator']['ssl_port']
+ args.adb_path = config['exes']['adb'] % {'abs_work_dir': context.mozharness_workdir}
+
+ return run_test_harness(parser, args)
+
+
+def setup_argument_parser():
+ import mozinfo
+ import reftestcommandline
+
+ global parser
+ mozinfo.find_and_update_from_json(here)
+ if mozinfo.info.get('buildapp') == 'mobile/android':
+ parser = reftestcommandline.RemoteArgumentsParser()
+ else:
+ parser = reftestcommandline.DesktopArgumentsParser()
+ return parser
+
+
+@CommandProvider
+class ReftestCommands(object):
+
+ def __init__(self, context):
+ self.context = context
+
+ @Command('reftest', category='testing',
+ description='Run the reftest harness.',
+ parser=setup_argument_parser)
+ def reftest(self, **kwargs):
+ kwargs['suite'] = 'reftest'
+ return run_reftest(self.context, **kwargs)
diff --git a/layout/tools/reftest/moz.build b/layout/tools/reftest/moz.build
new file mode 100644
index 000000000..4dcf1a6f6
--- /dev/null
+++ b/layout/tools/reftest/moz.build
@@ -0,0 +1,33 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files('**'):
+ BUG_COMPONENT = ('Testing', 'Reftest')
+
+XPI_NAME = 'reftest'
+USE_EXTENSION_MANIFEST = True
+JAR_MANIFESTS += ['jar.mn']
+FINAL_TARGET_PP_FILES += ['install.rdf']
+FINAL_TARGET_FILES += ['bootstrap.js']
+
+GENERATED_FILES += ['automation.py']
+TEST_HARNESS_FILES.reftest += [
+ '!automation.py',
+ '/build/mobile/b2gautomation.py',
+ '/build/mobile/remoteautomation.py',
+ '/build/pgo/server-locations.txt',
+ '/testing/mochitest/server.js',
+ 'b2g_start_script.js',
+ 'gaia_lock_screen.js',
+ 'mach_test_package_commands.py',
+ 'output.py',
+ 'reftest-preferences.js',
+ 'reftestcommandline.py',
+ 'remotereftest.py',
+ 'runreftest.py',
+ 'runreftestb2g.py',
+ 'runreftestmulet.py',
+]
diff --git a/layout/tools/reftest/output.py b/layout/tools/reftest/output.py
new file mode 100644
index 000000000..d7308de1f
--- /dev/null
+++ b/layout/tools/reftest/output.py
@@ -0,0 +1,135 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+import threading
+
+from mozlog.formatters import TbplFormatter
+from mozrunner.utils import get_stack_fixer_function
+
+
+class ReftestFormatter(TbplFormatter):
+ """
+ Formatter designed to preserve the legacy "tbpl" format in reftest.
+
+ This is needed for both the reftest-analyzer and mozharness log parsing.
+ We can change this format when both reftest-analyzer and mozharness have
+ been changed to read structured logs.
+ """
+
+ def __call__(self, data):
+ if 'component' in data and data['component'] == 'mozleak':
+ # Output from mozleak requires that no prefix be added
+ # so that mozharness will pick up these failures.
+ return "%s\n" % data['message']
+
+ formatted = TbplFormatter.__call__(self, data)
+ if data['action'] == 'process_output':
+ return formatted
+ return 'REFTEST %s' % formatted
+
+ def log(self, data):
+ prefix = "%s |" % data['level'].upper()
+ return "%s %s\n" % (prefix, data['message'])
+
+ def test_end(self, data):
+ extra = data.get('extra', {})
+ status = data['status']
+ test = data['test']
+
+ status_msg = "TEST-"
+ if 'expected' in data:
+ status_msg += "UNEXPECTED-%s" % status
+ else:
+ if status != "PASS":
+ status_msg += "KNOWN-"
+ status_msg += status
+ if extra.get('status_msg') == 'Random':
+ status_msg += "(EXPECTED RANDOM)"
+
+
+ output_text = "%s | %s | %s" % (status_msg, test, data.get("message", ""))
+
+ if "reftest_screenshots" in extra:
+ screenshots = extra["reftest_screenshots"]
+ if len(screenshots) == 3:
+ output_text += ("\nREFTEST IMAGE 1 (TEST): data:image/png;base64,%s\n"
+ "REFTEST IMAGE 2 (REFERENCE): data:image/png;base64,%s") % (screenshots[0]["screenshot"],
+ screenshots[2]["screenshot"])
+ elif len(screenshots) == 1:
+ output_text += "\nREFTEST IMAGE: data:image/png;base64,%(image1)s" % screenshots[0]["screenshot"]
+
+
+ output_text += "\nREFTEST TEST-END | %s" % test
+ return "%s\n" % output_text
+
+ def process_output(self, data):
+ return "%s\n" % data["data"]
+
+ def suite_end(self, data):
+ lines = []
+ summary = data['extra']['results']
+ summary['success'] = summary['Pass'] + summary['LoadOnly']
+ lines.append("Successful: %(success)s (%(Pass)s pass, %(LoadOnly)s load only)" %
+ summary)
+ summary['unexpected'] = (summary['Exception'] + summary['FailedLoad'] +
+ summary['UnexpectedFail'] + summary['UnexpectedPass'] +
+ summary['AssertionUnexpected'] +
+ summary['AssertionUnexpectedFixed'])
+ lines.append(("Unexpected: %(unexpected)s (%(UnexpectedFail)s unexpected fail, "
+ "%(UnexpectedPass)s unexpected pass, "
+ "%(AssertionUnexpected)s unexpected asserts, "
+ "%(FailedLoad)s failed load, "
+ "%(Exception)s exception)") % summary)
+ summary['known'] = (summary['KnownFail'] + summary['AssertionKnown'] +
+ summary['Random'] + summary['Skip'] + summary['Slow'])
+ lines.append(("Known problems: %(known)s (" +
+ "%(KnownFail)s known fail, " +
+ "%(AssertionKnown)s known asserts, " +
+ "%(Random)s random, " +
+ "%(Skip)s skipped, " +
+ "%(Slow)s slow)") % summary)
+ lines = ["REFTEST INFO | %s" % s for s in lines]
+ lines.append("REFTEST SUITE-END | Shutdown")
+ return "INFO | Result summary:\n{}\n".format('\n'.join(lines))
+
+
+class OutputHandler(object):
+ """Process the output of a process during a test run and translate
+ raw data logged from reftest.js to an appropriate structured log action,
+ where applicable.
+ """
+
+ def __init__(self, log, utilityPath, symbolsPath=None):
+ self.stack_fixer_function = get_stack_fixer_function(utilityPath, symbolsPath)
+ self.log = log
+ # needed for b2gautomation.py
+ self.suite_finished = False
+
+ def __call__(self, line):
+ # need to return processed messages to appease remoteautomation.py
+ if not line.strip():
+ return []
+
+ try:
+ data = json.loads(line)
+ except ValueError:
+ self.verbatim(line)
+ return [line]
+
+ if isinstance(data, dict) and 'action' in data:
+ if data['action'] == 'suite_end':
+ self.suite_finished = True
+
+ self.log.log_raw(data)
+ else:
+ self.verbatim(json.dumps(data))
+
+ return [data]
+
+ def verbatim(self, line):
+ if self.stack_fixer_function:
+ line = self.stack_fixer_function(line)
+ self.log.process_output(threading.current_thread().name, line)
diff --git a/layout/tools/reftest/reftest-analyzer-structured.xhtml b/layout/tools/reftest/reftest-analyzer-structured.xhtml
new file mode 100644
index 000000000..b06bd8169
--- /dev/null
+++ b/layout/tools/reftest/reftest-analyzer-structured.xhtml
@@ -0,0 +1,649 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- -*- Mode: HTML; tab-width: 2; indent-tabs-mode: nil; -*- -->
+<!-- vim: set shiftwidth=2 tabstop=2 autoindent expandtab: -->
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this
+ - file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
+<!--
+
+Features to add:
+* make the left and right parts of the viewer independently scrollable
+* make the test list filterable
+** default to only showing unexpecteds
+* add other ways to highlight differences other than circling?
+* add zoom/pan to images
+* Add ability to load log via XMLHttpRequest (also triggered via URL param)
+* color the test list based on pass/fail and expected/unexpected/random/skip
+* ability to load multiple logs ?
+** rename them by clicking on the name and editing
+** turn the test list into a collapsing tree view
+** move log loading into popup from viewer UI
+
+-->
+<!DOCTYPE html>
+<html lang="en-US" xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <title>Reftest analyzer</title>
+ <style type="text/css"><![CDATA[
+
+ html, body { margin: 0; }
+ html { padding: 0; }
+ body { padding: 4px; }
+
+ #pixelarea, #itemlist, #images { position: absolute; }
+ #itemlist, #images { overflow: auto; }
+ #pixelarea { top: 0; left: 0; width: 320px; height: 84px; overflow: visible }
+ #itemlist { top: 84px; left: 0; width: 320px; bottom: 0; }
+ #images { top: 0; bottom: 0; left: 320px; right: 0; }
+
+ #leftpane { width: 320px; }
+ #images { position: fixed; top: 10px; left: 340px; }
+
+ form#imgcontrols { margin: 0; display: block; }
+
+ #itemlist > table { border-collapse: collapse; }
+ #itemlist > table > tbody > tr > td { border: 1px solid; padding: 1px; }
+ #itemlist td.activeitem { background-color: yellow; }
+
+ /*
+ #itemlist > table > tbody > tr.pass > td.url { background: lime; }
+ #itemlist > table > tbody > tr.fail > td.url { background: red; }
+ */
+
+ #magnification > svg { display: block; width: 84px; height: 84px; }
+
+ #pixelinfo { font: small sans-serif; position: absolute; width: 200px; left: 84px; }
+ #pixelinfo table { border-collapse: collapse; }
+ #pixelinfo table th { white-space: nowrap; text-align: left; padding: 0; }
+ #pixelinfo table td { font-family: monospace; padding: 0 0 0 0.25em; }
+
+ #pixelhint { display: inline; color: #88f; cursor: help; }
+ #pixelhint > * { display: none; position: absolute; margin: 8px 0 0 8px; padding: 4px; width: 400px; background: #ffa; color: black; box-shadow: 3px 3px 2px #888; z-index: 1; }
+ #pixelhint:hover { color: #000; }
+ #pixelhint:hover > * { display: block; }
+ #pixelhint p { margin: 0; }
+ #pixelhint p + p { margin-top: 1em; }
+
+ ]]></style>
+ <script type="text/javascript"><![CDATA[
+
+var XLINK_NS = "http://www.w3.org/1999/xlink";
+var SVG_NS = "http://www.w3.org/2000/svg";
+var IMAGE_NOT_AVAILABLE = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKAAAAASCAYAAADczdVTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAHy0lEQVRoge2aX2hb5xnGf2dYabROgqQkpMuKnWUJLmxHMFaa/SscteQiF5EvUgqLctEVrDJKK1+MolzkQr4IctgW+SLIheJc1BpFpswJw92FbaZsTCGTL0465AtntUekJdJ8lByVHbnnwLsLKbKdSJbiZBVjeuAYn+/P+z3fc97vfd9zbEVEhB566BK+1m0CPfx/o+eAPXQVbR3QqVapOl8FlR46h0O1Wu02iacCZfsasMKEz8vbx1JYE6fY/dXx6mEbFObPcvDVDBlznpc9G+2r8xNcvLqK2w39r4UI+fs7tFjmytgFFu718865EIebPGincI3zFz7Bcrtx97/GL0P+p+IPbSOgRwXtW3vpewqL/a/g5rgf39hit2m0hGUAHOHrrq3trmef4/lDB7Ay57n01zuPZXPX7jUunv+Yf9ktR7D/0CHca7/n3KXPsHbAuynkCWCZptgiImKLaVqP9NuW1bT9ceybpr3j+WJbYrVa3rbEatGZi2uixvWdrysilmWKae2M+5PqlktoosayLfubcrN10dAk24aynUsIxMVsadwUs+EX7dEyAlaXLqMoCj6fj5HkUqO9MD+Govjx+xXcXi+uoRAhvwuv182Z8Ws4AJUlxoZ8uNxuvF43ii/EtdXNNUuV68lR/IqC4gsxPj7KkE/BF5qmClRXrzFSt+/1ulDOjLNU6eQ4OcyPDqH4hhg5O4LicuN2K4xcvk6jjHUKJM8O1fvcKMoZkouFOq1VPp1OcuXGAvrvfsv0lWmSySTzN0sdH+jyYhK/ouB2e/G6XfjPJikBVG8SUhT8fl99nwVGfQp+vx+f4iO5VO1AtwJjfgXF58M/kqSVJP9ef0xuAI6NlwWmL41xxqeg+PyMXr72yBqW3cI4JaZHh1DcXrxeLy5liORiB7q1PiZFyeV0mQqz9TRZeUmFVUGLSjqdkgCIFp2RTCosEJOiiIihSyKWkDl9WYrFnCQCCNF0w0QmHhBQJTEzJ+nZSQmAoEYks2KIGBkJgASiM5I3LbGMnCSCCEQl38GJMvMZiag1e+nlFcmmIgKaZEwREaPGhWGZ1VfEMFZkNj4sgCSyhoihSzwSlqCGoAUlEo1IJByW+Oxyh+dZJJ+eklhiRnIrRcnrM6KCxLOmiNiipyICSGR2pTY2O1m7T2XEsNrrJmJLfjkn6amwoMbFaMEhG28eAVtzExErW3sOBCWVzkpmNiEqCOEZ2RyLTT3eJAKaMhVEUMOSXjHEtg3JTIUFkNTK9rGwbQrWm2xGb6QoWxIqEtdtEWO28aDtoi6JSFCAjUtL1AUzJA4SSW/IZ2VjjU0V0zEBJBiJSzwWk1g8IZEAAmrdidrBkoSKxB4IW08tGVNEzIxoIJM5a8v4SQ1RY5lGSy6x8xScz6QkHFBre1Zre49nH+y1KDEQLV7TcyU1LBCtHVppp9smxk2dYAMtHXA7blZWNJDZ4sZ4MxPbdHjrbc3WNuvOq4YlkYhLLBaXeKx2sLcrBUS2ScFtUbUBh3WgajvgOYgGuKjw4Rsqb1uvkssbWLbJXFQFqL/I9IEKa2WzYcqy16E2BNteB1R+cuwoRwcHGRx4nlfenWMuPclRDx3goSraqd+7Gj/Y5d76SrXLu3VKLYW1rMZbo/QpB4+9zt6fT1I0Law/LRMBaLzC7ePNuSgL7/2GpcotLr7+AZG5t9gH0Fa3zuFq1tiWG4DKs5tebV1NDDW1XYd26iWO9A8wODjAUfUN5ubm+Ch4ZFuuLRzQoVwqUCqXyN9fg3tFSuUShVIZhyr5O2vo94o42DwD/PP23fq8Bf5urLO+BoHBwxzc20c++wcmz+lAkWLFATwcf3+YDwIDhMYmuDw+wt5j5+C5ZwDYP/gSoLP6xX5+fOIkJ47/lIP8g49/Nc3tDj59OZUiRR3uFYsAVO/eZoE1yvkyeA6gAaff+zU3SxUcp8LilQucnoFTP3hhix19/garlQqFW9eZOBti9Mqt9mubXwBw+NALeDC4cfVDzgP3i3keUN/nf4uo+hEver/DRaK84/9mY/72uoFTKVMolVn5/HPgPvlSmVKhRL2bSrlEqVyidH8N/d7t2u/lakfcKneLgM4rvxhncbXA6tI8kTffB+0NjnrAqZYplcrk83ceXdtzgB+psHD7S/pfPs7JkydQB1x8dnWS2SVje9GaxkVLl+DmNNC4NJn/S6JxH5nJyNRwrW7Qi7oMgxBMyd9molvmRKO1cExgshG6l9NTEhkOynAkLlOJoKBuhPV8ZlK0h9aNTqVbv3ltEK/VIiAQEN0yZVLbuM+aImLoEgts3VdsJrfFil1M1/ZSv9RAROaWO8n/hkyF1Q3bgeFGygvPrDRG5Wcf1IJbq9rlNrrNbra96aqlUVMSWrNnNiw5uw23T/4o4Xq7FtA29h2My3K9WtETgRZr13UxdIk+pGswkpCcsX0N2OZD9BOgWqFsgWePp20KWb0ywkDgEIa8y55Gq0O5XKHP7cGz++l/haxWylgOuD17aG7eoVpxwL27RX8b27jZ42n1qdahXKrg2bfnUW0eQ7edoD232l+/LPp2pHvNfh8eT2f8/3sO2AZLyRAvns6gqToLOgxP6Uz87HvdoNJDF9E1B6ysLrLw5yW+3PUNvv3dH/L9wX3doNFDl9E1B+yhB+j9O1YPXcZ/AAl9BWJNvZE7AAAAAElFTkSuQmCC";
+
+var gPhases = null;
+
+var gIDCache = {};
+
+var gMagPixPaths = []; // 2D array of array-of-two <path> objects used in the pixel magnifier
+var gMagWidth = 5; // number of zoomed in pixels to show horizontally
+var gMagHeight = 5; // number of zoomed in pixels to show vertically
+var gMagZoom = 16; // size of the zoomed in pixels
+var gImage1Data; // ImageData object for the reference image
+var gImage2Data; // ImageData object for the test output image
+var gFlashingPixels = []; // array of <path> objects that should be flashed due to pixel color mismatch
+var gParams;
+
+function ID(id) {
+ if (!(id in gIDCache))
+ gIDCache[id] = document.getElementById(id);
+ return gIDCache[id];
+}
+
+function hash_parameters() {
+ var result = { };
+ var params = window.location.hash.substr(1).split(/[&;]/);
+ for (var i = 0; i < params.length; i++) {
+ var parts = params[i].split("=");
+ result[parts[0]] = unescape(unescape(parts[1]));
+ }
+ return result;
+}
+
+function load() {
+ gPhases = [ ID("entry"), ID("loading"), ID("viewer") ];
+ build_mag();
+ gParams = hash_parameters();
+ if (gParams.log) {
+ show_phase("loading");
+ process_log(gParams.log);
+ } else if (gParams.logurl) {
+ show_phase("loading");
+ var req = new XMLHttpRequest();
+ req.onreadystatechange = function() {
+ if (req.readyState === 4) {
+ process_log(req.responseText);
+ }
+ };
+ req.open('GET', gParams.logurl, true);
+ req.send();
+ }
+ window.addEventListener('keypress', handle_keyboard_shortcut, false);
+ ID("image1").addEventListener('error', image_load_error, false);
+ ID("image2").addEventListener('error', image_load_error, false);
+}
+
+function image_load_error(e) {
+ e.target.setAttributeNS(XLINK_NS, "xlink:href", IMAGE_NOT_AVAILABLE);
+}
+
+function build_mag() {
+ var mag = ID("mag");
+
+ var r = document.createElementNS(SVG_NS, "rect");
+ r.setAttribute("x", gMagZoom * -gMagWidth / 2);
+ r.setAttribute("y", gMagZoom * -gMagHeight / 2);
+ r.setAttribute("width", gMagZoom * gMagWidth);
+ r.setAttribute("height", gMagZoom * gMagHeight);
+ mag.appendChild(r);
+
+ mag.setAttribute("transform", "translate(" + (gMagZoom * (gMagWidth / 2) + 1) + "," + (gMagZoom * (gMagHeight / 2) + 1) + ")");
+
+ for (var x = 0; x < gMagWidth; x++) {
+ gMagPixPaths[x] = [];
+ for (var y = 0; y < gMagHeight; y++) {
+ var p1 = document.createElementNS(SVG_NS, "path");
+ p1.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "h" + -gMagZoom + "v" + gMagZoom);
+ p1.setAttribute("stroke", "black");
+ p1.setAttribute("stroke-width", "1px");
+ p1.setAttribute("fill", "#aaa");
+
+ var p2 = document.createElementNS(SVG_NS, "path");
+ p2.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "v" + gMagZoom + "h" + -gMagZoom);
+ p2.setAttribute("stroke", "black");
+ p2.setAttribute("stroke-width", "1px");
+ p2.setAttribute("fill", "#888");
+
+ mag.appendChild(p1);
+ mag.appendChild(p2);
+ gMagPixPaths[x][y] = [p1, p2];
+ }
+ }
+
+ var flashedOn = false;
+ setInterval(function() {
+ flashedOn = !flashedOn;
+ flash_pixels(flashedOn);
+ }, 500);
+}
+
+function show_phase(phaseid) {
+ for (var i in gPhases) {
+ var phase = gPhases[i];
+ phase.style.display = (phase.id == phaseid) ? "" : "none";
+ }
+
+ if (phase == "viewer")
+ ID("images").style.display = "none";
+}
+
+function fileentry_changed() {
+ show_phase("loading");
+ var input = ID("fileentry");
+ var files = input.files;
+ if (files.length > 0) {
+ // Only handle the first file; don't handle multiple selection.
+ // The parts of the log we care about are ASCII-only. Since we
+ // can ignore lines we don't care about, best to read in as
+ // iso-8859-1, which guarantees we don't get decoding errors.
+ var fileReader = new FileReader();
+ fileReader.onload = function(e) {
+ var log = null;
+
+ log = e.target.result;
+
+ if (log)
+ process_log(log);
+ else
+ show_phase("entry");
+ }
+ fileReader.readAsText(files[0], "iso-8859-1");
+ }
+ // So the user can process the same filename again (after
+ // overwriting the log), clear the value on the form input so we
+ // will always get an onchange event.
+ input.value = "";
+}
+
+function log_pasted() {
+ show_phase("loading");
+ var entry = ID("logentry");
+ var log = entry.value;
+ entry.value = "";
+ process_log(log);
+}
+
+var gTestItems;
+
+function process_log(contents) {
+ var lines = contents.split(/[\r\n]+/);
+ gTestItems = [];
+ for (var j in lines) {
+ var line = lines[j];
+ try {
+ var data = JSON.parse(line);
+ } catch(e) {
+ continue;
+ }
+ // Ignore duplicated output in logcat.
+ if (!data.action == "test_end" && data.status != "FAIL")
+ continue;
+
+ if (!data.hasOwnProperty("extra") ||
+ !data.extra.hasOwnProperty("reftest_screenshots")) {
+ continue;
+ }
+
+ var url = data.test;
+ var screenshots = data.extra.reftest_screenshots;
+ gTestItems.push(
+ {
+ pass: data.status === "PASS",
+ // only one of the following three should ever be true
+ unexpected: data.hasOwnProperty("expected"),
+ random: false,
+ skip: data.status == "SKIP",
+ url: url,
+ images: [],
+ imageLabels: []
+ });
+
+ var item = gTestItems[gTestItems.length - 1];
+ item.images.push("data:image/png;base64," + screenshots[0].screenshot);
+ item.imageLabels.push(screenshots[0].url);
+ if (screenshots.length > 1) {
+ item.images.push("data:image/png;base64," + screenshots[2].screenshot);
+ item.imageLabels.push(screenshots[2].url);
+ }
+ }
+ build_viewer();
+}
+
+function build_viewer() {
+ if (gTestItems.length == 0) {
+ show_phase("entry");
+ return;
+ }
+
+ var cell = ID("itemlist");
+ while (cell.childNodes.length > 0)
+ cell.removeChild(cell.childNodes[cell.childNodes.length - 1]);
+
+ var table = document.createElement("table");
+ var tbody = document.createElement("tbody");
+ table.appendChild(tbody);
+
+ for (var i in gTestItems) {
+ var item = gTestItems[i];
+
+ // optional url filter for only showing unexpected results
+ if (parseInt(gParams.only_show_unexpected) && !item.unexpected)
+ continue;
+
+ // XXX regardless skip expected pass items until we have filtering UI
+ if (item.pass && !item.unexpected)
+ continue;
+
+ var tr = document.createElement("tr");
+ var rowclass = item.pass ? "pass" : "fail";
+ var td;
+ var text;
+
+ td = document.createElement("td");
+ text = "";
+ if (item.unexpected) { text += "!"; rowclass += " unexpected"; }
+ if (item.random) { text += "R"; rowclass += " random"; }
+ if (item.skip) { text += "S"; rowclass += " skip"; }
+ td.appendChild(document.createTextNode(text));
+ tr.appendChild(td);
+
+ td = document.createElement("td");
+ td.id = "item" + i;
+ td.className = "url";
+ // Only display part of URL after "/mozilla/".
+ var match = item.url.match(/\/mozilla\/(.*)/);
+ text = document.createTextNode(match ? match[1] : item.url);
+ if (item.images.length > 0) {
+ var a = document.createElement("a");
+ a.href = "javascript:show_images(" + i + ")";
+ a.appendChild(text);
+ td.appendChild(a);
+ } else {
+ td.appendChild(text);
+ }
+ tr.appendChild(td);
+
+ tbody.appendChild(tr);
+ }
+
+ cell.appendChild(table);
+
+ show_phase("viewer");
+}
+
+function get_image_data(src, whenReady) {
+ var img = new Image();
+ img.onload = function() {
+ var canvas = document.createElement("canvas");
+ canvas.width = img.naturalWidth;
+ canvas.height = img.naturalHeight;
+
+ var ctx = canvas.getContext("2d");
+ ctx.drawImage(img, 0, 0);
+
+ whenReady(ctx.getImageData(0, 0, img.naturalWidth, img.naturalHeight));
+ };
+ img.src = src;
+}
+
+function sync_svg_size(imageData) {
+ // We need the size of the 'svg' and its 'image' elements to match the size
+ // of the ImageData objects that we're going to read pixels from or else our
+ // magnify() function will be very broken.
+ ID("svg").setAttribute("width", imageData.width);
+ ID("svg").setAttribute("height", imageData.height);
+}
+
+function show_images(i) {
+ var item = gTestItems[i];
+ var cell = ID("images");
+
+ // Remove activeitem class from any existing elements
+ var activeItems = document.querySelectorAll(".activeitem");
+ for (var activeItemIdx = activeItems.length; activeItemIdx-- != 0;) {
+ activeItems[activeItemIdx].classList.remove("activeitem");
+ }
+
+ ID("item" + i).classList.add("activeitem");
+ ID("image1").style.display = "";
+ ID("image2").style.display = "none";
+ ID("diffrect").style.display = "none";
+ ID("imgcontrols").reset();
+
+ ID("image1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]);
+ // Making the href be #image1 doesn't seem to work
+ ID("feimage1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]);
+ if (item.images.length == 1) {
+ ID("imgcontrols").style.display = "none";
+ } else {
+ ID("imgcontrols").style.display = "";
+
+ ID("image2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]);
+ // Making the href be #image2 doesn't seem to work
+ ID("feimage2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]);
+
+ ID("label1").textContent = 'Image ' + item.imageLabels[0];
+ ID("label2").textContent = 'Image ' + item.imageLabels[1];
+ }
+
+ cell.style.display = "";
+
+ get_image_data(item.images[0], function(data) { gImage1Data = data; sync_svg_size(gImage1Data); });
+ get_image_data(item.images[1], function(data) { gImage2Data = data });
+}
+
+function show_image(i) {
+ if (i == 1) {
+ ID("image1").style.display = "";
+ ID("image2").style.display = "none";
+ } else {
+ ID("image1").style.display = "none";
+ ID("image2").style.display = "";
+ }
+}
+
+function handle_keyboard_shortcut(event) {
+ switch (event.charCode) {
+ case 49: // "1" key
+ document.getElementById("radio1").checked = true;
+ show_image(1);
+ break;
+ case 50: // "2" key
+ document.getElementById("radio2").checked = true;
+ show_image(2);
+ break;
+ case 100: // "d" key
+ document.getElementById("differences").click();
+ break;
+ case 112: // "p" key
+ shift_images(-1);
+ break;
+ case 110: // "n" key
+ shift_images(1);
+ break;
+ }
+}
+
+function shift_images(dir) {
+ var activeItem = document.querySelector(".activeitem");
+ if (!activeItem) {
+ return;
+ }
+ for (var elm = activeItem; elm; elm = elm.parentElement) {
+ if (elm.tagName != "tr") {
+ continue;
+ }
+ elm = dir > 0 ? elm.nextElementSibling : elm.previousElementSibling;
+ if (elm) {
+ elm.getElementsByTagName("a")[0].click();
+ }
+ return;
+ }
+}
+
+function show_differences(cb) {
+ ID("diffrect").style.display = cb.checked ? "" : "none";
+}
+
+function flash_pixels(on) {
+ var stroke = on ? "red" : "black";
+ var strokeWidth = on ? "2px" : "1px";
+ for (var i = 0; i < gFlashingPixels.length; i++) {
+ gFlashingPixels[i].setAttribute("stroke", stroke);
+ gFlashingPixels[i].setAttribute("stroke-width", strokeWidth);
+ }
+}
+
+function cursor_point(evt) {
+ var m = evt.target.getScreenCTM().inverse();
+ var p = ID("svg").createSVGPoint();
+ p.x = evt.clientX;
+ p.y = evt.clientY;
+ p = p.matrixTransform(m);
+ return { x: Math.floor(p.x), y: Math.floor(p.y) };
+}
+
+function hex2(i) {
+ return (i < 16 ? "0" : "") + i.toString(16);
+}
+
+function canvas_pixel_as_hex(data, x, y) {
+ var offset = (y * data.width + x) * 4;
+ var r = data.data[offset];
+ var g = data.data[offset + 1];
+ var b = data.data[offset + 2];
+ return "#" + hex2(r) + hex2(g) + hex2(b);
+}
+
+function hex_as_rgb(hex) {
+ return "rgb(" + [parseInt(hex.substring(1, 3), 16), parseInt(hex.substring(3, 5), 16), parseInt(hex.substring(5, 7), 16)] + ")";
+}
+
+function magnify(evt) {
+ var { x: x, y: y } = cursor_point(evt);
+ var centerPixelColor1, centerPixelColor2;
+
+ var dx_lo = -Math.floor(gMagWidth / 2);
+ var dx_hi = Math.floor(gMagWidth / 2);
+ var dy_lo = -Math.floor(gMagHeight / 2);
+ var dy_hi = Math.floor(gMagHeight / 2);
+
+ flash_pixels(false);
+ gFlashingPixels = [];
+ for (var j = dy_lo; j <= dy_hi; j++) {
+ for (var i = dx_lo; i <= dx_hi; i++) {
+ var px = x + i;
+ var py = y + j;
+ var p1 = gMagPixPaths[i + dx_hi][j + dy_hi][0];
+ var p2 = gMagPixPaths[i + dx_hi][j + dy_hi][1];
+ // Here we just use the dimensions of gImage1Data since we expect test
+ // and reference to have the same dimensions.
+ if (px < 0 || py < 0 || px >= gImage1Data.width || py >= gImage1Data.height) {
+ p1.setAttribute("fill", "#aaa");
+ p2.setAttribute("fill", "#888");
+ } else {
+ var color1 = canvas_pixel_as_hex(gImage1Data, x + i, y + j);
+ var color2 = canvas_pixel_as_hex(gImage2Data, x + i, y + j);
+ p1.setAttribute("fill", color1);
+ p2.setAttribute("fill", color2);
+ if (color1 != color2) {
+ gFlashingPixels.push(p1, p2);
+ p1.parentNode.appendChild(p1);
+ p2.parentNode.appendChild(p2);
+ }
+ if (i == 0 && j == 0) {
+ centerPixelColor1 = color1;
+ centerPixelColor2 = color2;
+ }
+ }
+ }
+ }
+ flash_pixels(true);
+ show_pixelinfo(x, y, centerPixelColor1, hex_as_rgb(centerPixelColor1), centerPixelColor2, hex_as_rgb(centerPixelColor2));
+}
+
+function show_pixelinfo(x, y, pix1rgb, pix1hex, pix2rgb, pix2hex) {
+ var pixelinfo = ID("pixelinfo");
+ ID("coords").textContent = [x, y];
+ ID("pix1hex").textContent = pix1hex;
+ ID("pix1rgb").textContent = pix1rgb;
+ ID("pix2hex").textContent = pix2hex;
+ ID("pix2rgb").textContent = pix2rgb;
+}
+
+ ]]></script>
+
+</head>
+<body onload="load()">
+
+<div id="entry">
+
+<h1>Reftest analyzer: load raw structured log</h1>
+
+<p>Either paste your log into this textarea:<br />
+<textarea cols="80" rows="10" id="logentry"/><br/>
+<input type="button" value="Process pasted log" onclick="log_pasted()" /></p>
+
+<p>... or load it from a file:<br/>
+<input type="file" id="fileentry" onchange="fileentry_changed()" />
+</p>
+</div>
+
+<div id="loading" style="display:none">Loading log...</div>
+
+<div id="viewer" style="display:none">
+ <div id="pixelarea">
+ <div id="pixelinfo">
+ <table>
+ <tbody>
+ <tr><th>Pixel at:</th><td colspan="2" id="coords"/></tr>
+ <tr><th>Image 1:</th><td id="pix1rgb"></td><td id="pix1hex"></td></tr>
+ <tr><th>Image 2:</th><td id="pix2rgb"></td><td id="pix2hex"></td></tr>
+ </tbody>
+ </table>
+ <div>
+ <div id="pixelhint">★
+ <div>
+ <p>Move the mouse over the reftest image on the right to show
+ magnified pixels on the left. The color information above is for
+ the pixel centered in the magnified view.</p>
+ <p>Image 1 is shown in the upper triangle of each pixel and Image 2
+ is shown in the lower triangle.</p>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div id="magnification">
+ <svg xmlns="http://www.w3.org/2000/svg" width="84" height="84" shape-rendering="optimizeSpeed">
+ <g id="mag"/>
+ </svg>
+ </div>
+ </div>
+ <div id="itemlist"></div>
+ <div id="images" style="display:none">
+ <form id="imgcontrols">
+ <input id="radio1" type="radio" name="which" value="0" onchange="show_image(1)" checked="checked" /><label id="label1" title="1" for="radio1">Image 1</label>
+ <input id="radio2" type="radio" name="which" value="1" onchange="show_image(2)" /><label id="label2" title="2" for="radio2">Image 2</label>
+ <label><input id="differences" type="checkbox" onchange="show_differences(this)" />Circle differences</label>
+ </form>
+ <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="800" height="1000" id="svg">
+ <defs>
+ <!-- use sRGB to avoid loss of data -->
+ <filter id="showDifferences" x="0%" y="0%" width="100%" height="100%"
+ style="color-interpolation-filters: sRGB">
+ <feImage id="feimage1" result="img1" xlink:href="#image1" />
+ <feImage id="feimage2" result="img2" xlink:href="#image2" />
+ <!-- inv1 and inv2 are the images with RGB inverted -->
+ <feComponentTransfer result="inv1" in="img1">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <feComponentTransfer result="inv2" in="img2">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <!-- w1 will have non-white pixels anywhere that img2
+ is brighter than img1, and w2 for the reverse.
+ It would be nice not to have to go through these
+ intermediate states, but feComposite
+ type="arithmetic" can't transform the RGB channels
+ and leave the alpha channel untouched. -->
+ <feComposite result="w1" in="img1" in2="inv2" operator="arithmetic" k2="1" k3="1" />
+ <feComposite result="w2" in="img2" in2="inv1" operator="arithmetic" k2="1" k3="1" />
+ <!-- c1 will have non-black pixels anywhere that img2
+ is brighter than img1, and c2 for the reverse -->
+ <feComponentTransfer result="c1" in="w1">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <feComponentTransfer result="c2" in="w2">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <!-- c will be nonblack (and fully on) for every pixel+component where there are differences -->
+ <feComposite result="c" in="c1" in2="c2" operator="arithmetic" k2="255" k3="255" />
+ <!-- a will be opaque for every pixel with differences and transparent for all others -->
+ <feColorMatrix result="a" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0" />
+
+ <!-- a, dilated by 1 pixel -->
+ <feMorphology result="dila1" in="a" operator="dilate" radius="1" />
+ <!-- a, dilated by 2 pixels -->
+ <feMorphology result="dila2" in="dila1" operator="dilate" radius="1" />
+
+ <!-- all the pixels in the 2-pixel dilation of a but not in the 1-pixel dilation, to highlight the diffs -->
+ <feComposite result="highlight" in="dila2" in2="dila1" operator="out" />
+
+ <feFlood result="red" flood-color="red" />
+ <feComposite result="redhighlight" in="red" in2="highlight" operator="in" />
+ <feFlood result="black" flood-color="black" flood-opacity="0.5" />
+ <feMerge>
+ <feMergeNode in="black" />
+ <feMergeNode in="redhighlight" />
+ </feMerge>
+ </filter>
+ </defs>
+ <g onmousemove="magnify(evt)">
+ <image x="0" y="0" width="100%" height="100%" id="image1" />
+ <image x="0" y="0" width="100%" height="100%" id="image2" />
+ </g>
+ <rect id="diffrect" filter="url(#showDifferences)" pointer-events="none" x="0" y="0" width="100%" height="100%" />
+ </svg>
+ </div>
+</div>
+
+</body>
+</html>
diff --git a/layout/tools/reftest/reftest-analyzer.xhtml b/layout/tools/reftest/reftest-analyzer.xhtml
new file mode 100644
index 000000000..64a50253e
--- /dev/null
+++ b/layout/tools/reftest/reftest-analyzer.xhtml
@@ -0,0 +1,647 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- -*- Mode: HTML; tab-width: 2; indent-tabs-mode: nil; -*- -->
+<!-- vim: set shiftwidth=2 tabstop=2 autoindent expandtab: -->
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this
+ - file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
+<!--
+
+Features to add:
+* make the left and right parts of the viewer independently scrollable
+* make the test list filterable
+** default to only showing unexpecteds
+* add other ways to highlight differences other than circling?
+* add zoom/pan to images
+* Add ability to load log via XMLHttpRequest (also triggered via URL param)
+* color the test list based on pass/fail and expected/unexpected/random/skip
+* ability to load multiple logs ?
+** rename them by clicking on the name and editing
+** turn the test list into a collapsing tree view
+** move log loading into popup from viewer UI
+
+-->
+<!DOCTYPE html>
+<html lang="en-US" xml:lang="en-US" xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <title>Reftest analyzer</title>
+ <style type="text/css"><![CDATA[
+
+ html, body { margin: 0; }
+ html { padding: 0; }
+ body { padding: 4px; }
+
+ #pixelarea, #itemlist, #images { position: absolute; }
+ #itemlist, #images { overflow: auto; }
+ #pixelarea { top: 0; left: 0; width: 320px; height: 84px; overflow: visible }
+ #itemlist { top: 84px; left: 0; width: 320px; bottom: 0; }
+ #images { top: 0; bottom: 0; left: 320px; right: 0; }
+
+ #leftpane { width: 320px; }
+ #images { position: fixed; top: 10px; left: 340px; }
+
+ form#imgcontrols { margin: 0; display: block; }
+
+ #itemlist > table { border-collapse: collapse; }
+ #itemlist > table > tbody > tr > td { border: 1px solid; padding: 1px; }
+ #itemlist td.activeitem { background-color: yellow; }
+
+ /*
+ #itemlist > table > tbody > tr.pass > td.url { background: lime; }
+ #itemlist > table > tbody > tr.fail > td.url { background: red; }
+ */
+
+ #magnification > svg { display: block; width: 84px; height: 84px; }
+
+ #pixelinfo { font: small sans-serif; position: absolute; width: 200px; left: 84px; }
+ #pixelinfo table { border-collapse: collapse; }
+ #pixelinfo table th { white-space: nowrap; text-align: left; padding: 0; }
+ #pixelinfo table td { font-family: monospace; padding: 0 0 0 0.25em; }
+
+ #pixelhint { display: inline; color: #88f; cursor: help; }
+ #pixelhint > * { display: none; position: absolute; margin: 8px 0 0 8px; padding: 4px; width: 400px; background: #ffa; color: black; box-shadow: 3px 3px 2px #888; z-index: 1; }
+ #pixelhint:hover { color: #000; }
+ #pixelhint:hover > * { display: block; }
+ #pixelhint p { margin: 0; }
+ #pixelhint p + p { margin-top: 1em; }
+
+ ]]></style>
+ <script type="text/javascript"><![CDATA[
+
+var XLINK_NS = "http://www.w3.org/1999/xlink";
+var SVG_NS = "http://www.w3.org/2000/svg";
+var IMAGE_NOT_AVAILABLE = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKAAAAASCAYAAADczdVTAAAACXBIWXMAAAsTAAALEwEAmpwYAAAHy0lEQVRoge2aX2hb5xnGf2dYabROgqQkpMuKnWUJLmxHMFaa/SscteQiF5EvUgqLctEVrDJKK1+MolzkQr4IctgW+SLIheJc1BpFpswJw92FbaZsTCGTL0465AtntUekJdJ8lByVHbnnwLsLKbKdSJbiZBVjeuAYn+/P+z3fc97vfd9zbEVEhB566BK+1m0CPfx/o+eAPXQVbR3QqVapOl8FlR46h0O1Wu02iacCZfsasMKEz8vbx1JYE6fY/dXx6mEbFObPcvDVDBlznpc9G+2r8xNcvLqK2w39r4UI+fs7tFjmytgFFu718865EIebPGincI3zFz7Bcrtx97/GL0P+p+IPbSOgRwXtW3vpewqL/a/g5rgf39hit2m0hGUAHOHrrq3trmef4/lDB7Ay57n01zuPZXPX7jUunv+Yf9ktR7D/0CHca7/n3KXPsHbAuynkCWCZptgiImKLaVqP9NuW1bT9ceybpr3j+WJbYrVa3rbEatGZi2uixvWdrysilmWKae2M+5PqlktoosayLfubcrN10dAk24aynUsIxMVsadwUs+EX7dEyAlaXLqMoCj6fj5HkUqO9MD+Govjx+xXcXi+uoRAhvwuv182Z8Ws4AJUlxoZ8uNxuvF43ii/EtdXNNUuV68lR/IqC4gsxPj7KkE/BF5qmClRXrzFSt+/1ulDOjLNU6eQ4OcyPDqH4hhg5O4LicuN2K4xcvk6jjHUKJM8O1fvcKMoZkouFOq1VPp1OcuXGAvrvfsv0lWmSySTzN0sdH+jyYhK/ouB2e/G6XfjPJikBVG8SUhT8fl99nwVGfQp+vx+f4iO5VO1AtwJjfgXF58M/kqSVJP9ef0xuAI6NlwWmL41xxqeg+PyMXr72yBqW3cI4JaZHh1DcXrxeLy5liORiB7q1PiZFyeV0mQqz9TRZeUmFVUGLSjqdkgCIFp2RTCosEJOiiIihSyKWkDl9WYrFnCQCCNF0w0QmHhBQJTEzJ+nZSQmAoEYks2KIGBkJgASiM5I3LbGMnCSCCEQl38GJMvMZiag1e+nlFcmmIgKaZEwREaPGhWGZ1VfEMFZkNj4sgCSyhoihSzwSlqCGoAUlEo1IJByW+Oxyh+dZJJ+eklhiRnIrRcnrM6KCxLOmiNiipyICSGR2pTY2O1m7T2XEsNrrJmJLfjkn6amwoMbFaMEhG28eAVtzExErW3sOBCWVzkpmNiEqCOEZ2RyLTT3eJAKaMhVEUMOSXjHEtg3JTIUFkNTK9rGwbQrWm2xGb6QoWxIqEtdtEWO28aDtoi6JSFCAjUtL1AUzJA4SSW/IZ2VjjU0V0zEBJBiJSzwWk1g8IZEAAmrdidrBkoSKxB4IW08tGVNEzIxoIJM5a8v4SQ1RY5lGSy6x8xScz6QkHFBre1Zre49nH+y1KDEQLV7TcyU1LBCtHVppp9smxk2dYAMtHXA7blZWNJDZ4sZ4MxPbdHjrbc3WNuvOq4YlkYhLLBaXeKx2sLcrBUS2ScFtUbUBh3WgajvgOYgGuKjw4Rsqb1uvkssbWLbJXFQFqL/I9IEKa2WzYcqy16E2BNteB1R+cuwoRwcHGRx4nlfenWMuPclRDx3goSraqd+7Gj/Y5d76SrXLu3VKLYW1rMZbo/QpB4+9zt6fT1I0Law/LRMBaLzC7ePNuSgL7/2GpcotLr7+AZG5t9gH0Fa3zuFq1tiWG4DKs5tebV1NDDW1XYd26iWO9A8wODjAUfUN5ubm+Ch4ZFuuLRzQoVwqUCqXyN9fg3tFSuUShVIZhyr5O2vo94o42DwD/PP23fq8Bf5urLO+BoHBwxzc20c++wcmz+lAkWLFATwcf3+YDwIDhMYmuDw+wt5j5+C5ZwDYP/gSoLP6xX5+fOIkJ47/lIP8g49/Nc3tDj59OZUiRR3uFYsAVO/eZoE1yvkyeA6gAaff+zU3SxUcp8LilQucnoFTP3hhix19/garlQqFW9eZOBti9Mqt9mubXwBw+NALeDC4cfVDzgP3i3keUN/nf4uo+hEver/DRaK84/9mY/72uoFTKVMolVn5/HPgPvlSmVKhRL2bSrlEqVyidH8N/d7t2u/lakfcKneLgM4rvxhncbXA6tI8kTffB+0NjnrAqZYplcrk83ceXdtzgB+psHD7S/pfPs7JkydQB1x8dnWS2SVje9GaxkVLl+DmNNC4NJn/S6JxH5nJyNRwrW7Qi7oMgxBMyd9molvmRKO1cExgshG6l9NTEhkOynAkLlOJoKBuhPV8ZlK0h9aNTqVbv3ltEK/VIiAQEN0yZVLbuM+aImLoEgts3VdsJrfFil1M1/ZSv9RAROaWO8n/hkyF1Q3bgeFGygvPrDRG5Wcf1IJbq9rlNrrNbra96aqlUVMSWrNnNiw5uw23T/4o4Xq7FtA29h2My3K9WtETgRZr13UxdIk+pGswkpCcsX0N2OZD9BOgWqFsgWePp20KWb0ywkDgEIa8y55Gq0O5XKHP7cGz++l/haxWylgOuD17aG7eoVpxwL27RX8b27jZ42n1qdahXKrg2bfnUW0eQ7edoD232l+/LPp2pHvNfh8eT2f8/3sO2AZLyRAvns6gqToLOgxP6Uz87HvdoNJDF9E1B6ysLrLw5yW+3PUNvv3dH/L9wX3doNFDl9E1B+yhB+j9O1YPXcZ/AAl9BWJNvZE7AAAAAElFTkSuQmCC";
+
+var gPhases = null;
+
+var gIDCache = {};
+
+var gMagPixPaths = []; // 2D array of array-of-two <path> objects used in the pixel magnifier
+var gMagWidth = 5; // number of zoomed in pixels to show horizontally
+var gMagHeight = 5; // number of zoomed in pixels to show vertically
+var gMagZoom = 16; // size of the zoomed in pixels
+var gImage1Data; // ImageData object for the reference image
+var gImage2Data; // ImageData object for the test output image
+var gFlashingPixels = []; // array of <path> objects that should be flashed due to pixel color mismatch
+var gParams;
+
+function ID(id) {
+ if (!(id in gIDCache))
+ gIDCache[id] = document.getElementById(id);
+ return gIDCache[id];
+}
+
+function hash_parameters() {
+ var result = { };
+ var params = window.location.hash.substr(1).split(/[&;]/);
+ for (var i = 0; i < params.length; i++) {
+ var parts = params[i].split("=");
+ result[parts[0]] = unescape(unescape(parts[1]));
+ }
+ return result;
+}
+
+function load() {
+ gPhases = [ ID("entry"), ID("loading"), ID("viewer") ];
+ build_mag();
+ gParams = hash_parameters();
+ if (gParams.log) {
+ show_phase("loading");
+ process_log(gParams.log);
+ } else if (gParams.logurl) {
+ show_phase("loading");
+ var req = new XMLHttpRequest();
+ req.onreadystatechange = function() {
+ if (req.readyState === 4) {
+ process_log(req.responseText);
+ }
+ };
+ req.open('GET', gParams.logurl, true);
+ req.send();
+ }
+ window.addEventListener('keypress', handle_keyboard_shortcut, false);
+ ID("image1").addEventListener('error', image_load_error, false);
+ ID("image2").addEventListener('error', image_load_error, false);
+}
+
+function image_load_error(e) {
+ e.target.setAttributeNS(XLINK_NS, "xlink:href", IMAGE_NOT_AVAILABLE);
+}
+
+function build_mag() {
+ var mag = ID("mag");
+
+ var r = document.createElementNS(SVG_NS, "rect");
+ r.setAttribute("x", gMagZoom * -gMagWidth / 2);
+ r.setAttribute("y", gMagZoom * -gMagHeight / 2);
+ r.setAttribute("width", gMagZoom * gMagWidth);
+ r.setAttribute("height", gMagZoom * gMagHeight);
+ mag.appendChild(r);
+
+ mag.setAttribute("transform", "translate(" + (gMagZoom * (gMagWidth / 2) + 1) + "," + (gMagZoom * (gMagHeight / 2) + 1) + ")");
+
+ for (var x = 0; x < gMagWidth; x++) {
+ gMagPixPaths[x] = [];
+ for (var y = 0; y < gMagHeight; y++) {
+ var p1 = document.createElementNS(SVG_NS, "path");
+ p1.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "h" + -gMagZoom + "v" + gMagZoom);
+ p1.setAttribute("stroke", "black");
+ p1.setAttribute("stroke-width", "1px");
+ p1.setAttribute("fill", "#aaa");
+
+ var p2 = document.createElementNS(SVG_NS, "path");
+ p2.setAttribute("d", "M" + ((x - gMagWidth / 2) + 1) * gMagZoom + "," + (y - gMagHeight / 2) * gMagZoom + "v" + gMagZoom + "h" + -gMagZoom);
+ p2.setAttribute("stroke", "black");
+ p2.setAttribute("stroke-width", "1px");
+ p2.setAttribute("fill", "#888");
+
+ mag.appendChild(p1);
+ mag.appendChild(p2);
+ gMagPixPaths[x][y] = [p1, p2];
+ }
+ }
+
+ var flashedOn = false;
+ setInterval(function() {
+ flashedOn = !flashedOn;
+ flash_pixels(flashedOn);
+ }, 500);
+}
+
+function show_phase(phaseid) {
+ for (var i in gPhases) {
+ var phase = gPhases[i];
+ phase.style.display = (phase.id == phaseid) ? "" : "none";
+ }
+
+ if (phase == "viewer")
+ ID("images").style.display = "none";
+}
+
+function fileentry_changed() {
+ show_phase("loading");
+ var input = ID("fileentry");
+ var files = input.files;
+ if (files.length > 0) {
+ // Only handle the first file; don't handle multiple selection.
+ // The parts of the log we care about are ASCII-only. Since we
+ // can ignore lines we don't care about, best to read in as
+ // iso-8859-1, which guarantees we don't get decoding errors.
+ var fileReader = new FileReader();
+ fileReader.onload = function(e) {
+ var log = null;
+
+ log = e.target.result;
+
+ if (log)
+ process_log(log);
+ else
+ show_phase("entry");
+ }
+ fileReader.readAsText(files[0], "iso-8859-1");
+ }
+ // So the user can process the same filename again (after
+ // overwriting the log), clear the value on the form input so we
+ // will always get an onchange event.
+ input.value = "";
+}
+
+function log_pasted() {
+ show_phase("loading");
+ var entry = ID("logentry");
+ var log = entry.value;
+ entry.value = "";
+ process_log(log);
+}
+
+var gTestItems;
+
+function process_log(contents) {
+ var lines = contents.split(/[\r\n]+/);
+ gTestItems = [];
+ for (var j in lines) {
+ var line = lines[j];
+ // Ignore duplicated output in logcat.
+ if (line.match(/I\/Gecko.*?REFTEST/))
+ continue;
+ var match = line.match(/^.*?REFTEST (.*)$/);
+ if (!match)
+ continue;
+ line = match[1];
+ match = line.match(/^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO)(\(EXPECTED RANDOM\)|) \| ([^\|]+) \|(.*)/);
+ if (match) {
+ var state = match[1];
+ var random = match[2];
+ var url = match[3];
+ var extra = match[4];
+ gTestItems.push(
+ {
+ pass: !state.match(/DEBUG-INFO$|FAIL$/),
+ // only one of the following three should ever be true
+ unexpected: !!state.match(/^TEST-UNEXPECTED/),
+ random: (random == "(EXPECTED RANDOM)"),
+ skip: (extra == " (SKIP)"),
+ url: url,
+ images: [],
+ imageLabels: []
+ });
+ continue;
+ }
+ match = line.match(/IMAGE([^:]*): (data:.*)$/);
+ if (match) {
+ var item = gTestItems[gTestItems.length - 1];
+ item.images.push(match[2]);
+ item.imageLabels.push(match[1]);
+ }
+ }
+
+ build_viewer();
+}
+
+function build_viewer() {
+ if (gTestItems.length == 0) {
+ show_phase("entry");
+ return;
+ }
+
+ var cell = ID("itemlist");
+ while (cell.childNodes.length > 0)
+ cell.removeChild(cell.childNodes[cell.childNodes.length - 1]);
+
+ var table = document.createElement("table");
+ var tbody = document.createElement("tbody");
+ table.appendChild(tbody);
+
+ for (var i in gTestItems) {
+ var item = gTestItems[i];
+
+ // optional url filter for only showing unexpected results
+ if (parseInt(gParams.only_show_unexpected) && !item.unexpected)
+ continue;
+
+ // XXX regardless skip expected pass items until we have filtering UI
+ if (item.pass && !item.unexpected)
+ continue;
+
+ var tr = document.createElement("tr");
+ var rowclass = item.pass ? "pass" : "fail";
+ var td;
+ var text;
+
+ td = document.createElement("td");
+ text = "";
+ if (item.unexpected) { text += "!"; rowclass += " unexpected"; }
+ if (item.random) { text += "R"; rowclass += " random"; }
+ if (item.skip) { text += "S"; rowclass += " skip"; }
+ td.appendChild(document.createTextNode(text));
+ tr.appendChild(td);
+
+ td = document.createElement("td");
+ td.id = "item" + i;
+ td.className = "url";
+ // Only display part of URL after "/mozilla/".
+ var match = item.url.match(/\/mozilla\/(.*)/);
+ text = document.createTextNode(match ? match[1] : item.url);
+ if (item.images.length > 0) {
+ var a = document.createElement("a");
+ a.href = "javascript:show_images(" + i + ")";
+ a.appendChild(text);
+ td.appendChild(a);
+ } else {
+ td.appendChild(text);
+ }
+ tr.appendChild(td);
+
+ tbody.appendChild(tr);
+ }
+
+ cell.appendChild(table);
+
+ show_phase("viewer");
+}
+
+function get_image_data(src, whenReady) {
+ var img = new Image();
+ img.onload = function() {
+ var canvas = document.createElement("canvas");
+ canvas.width = img.naturalWidth;
+ canvas.height = img.naturalHeight;
+
+ var ctx = canvas.getContext("2d");
+ ctx.drawImage(img, 0, 0);
+
+ whenReady(ctx.getImageData(0, 0, img.naturalWidth, img.naturalHeight));
+ };
+ img.src = src;
+}
+
+function sync_svg_size(imageData) {
+ // We need the size of the 'svg' and its 'image' elements to match the size
+ // of the ImageData objects that we're going to read pixels from or else our
+ // magnify() function will be very broken.
+ ID("svg").setAttribute("width", imageData.width);
+ ID("svg").setAttribute("height", imageData.height);
+}
+
+function show_images(i) {
+ var item = gTestItems[i];
+ var cell = ID("images");
+
+ // Remove activeitem class from any existing elements
+ var activeItems = document.querySelectorAll(".activeitem");
+ for (var activeItemIdx = activeItems.length; activeItemIdx-- != 0;) {
+ activeItems[activeItemIdx].classList.remove("activeitem");
+ }
+
+ ID("item" + i).classList.add("activeitem");
+ ID("image1").style.display = "";
+ ID("image2").style.display = "none";
+ ID("diffrect").style.display = "none";
+ ID("imgcontrols").reset();
+
+ ID("image1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]);
+ // Making the href be #image1 doesn't seem to work
+ ID("feimage1").setAttributeNS(XLINK_NS, "xlink:href", item.images[0]);
+ if (item.images.length == 1) {
+ ID("imgcontrols").style.display = "none";
+ } else {
+ ID("imgcontrols").style.display = "";
+
+ ID("image2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]);
+ // Making the href be #image2 doesn't seem to work
+ ID("feimage2").setAttributeNS(XLINK_NS, "xlink:href", item.images[1]);
+
+ ID("label1").textContent = 'Image ' + item.imageLabels[0];
+ ID("label2").textContent = 'Image ' + item.imageLabels[1];
+ }
+
+ cell.style.display = "";
+
+ get_image_data(item.images[0], function(data) { gImage1Data = data; sync_svg_size(gImage1Data); });
+ get_image_data(item.images[1], function(data) { gImage2Data = data });
+}
+
+function show_image(i) {
+ if (i == 1) {
+ ID("image1").style.display = "";
+ ID("image2").style.display = "none";
+ } else {
+ ID("image1").style.display = "none";
+ ID("image2").style.display = "";
+ }
+}
+
+function handle_keyboard_shortcut(event) {
+ switch (event.charCode) {
+ case 49: // "1" key
+ document.getElementById("radio1").checked = true;
+ show_image(1);
+ break;
+ case 50: // "2" key
+ document.getElementById("radio2").checked = true;
+ show_image(2);
+ break;
+ case 100: // "d" key
+ document.getElementById("differences").click();
+ break;
+ case 112: // "p" key
+ shift_images(-1);
+ break;
+ case 110: // "n" key
+ shift_images(1);
+ break;
+ }
+}
+
+function shift_images(dir) {
+ var activeItem = document.querySelector(".activeitem");
+ if (!activeItem) {
+ return;
+ }
+ for (var elm = activeItem; elm; elm = elm.parentElement) {
+ if (elm.tagName != "tr") {
+ continue;
+ }
+ elm = dir > 0 ? elm.nextElementSibling : elm.previousElementSibling;
+ if (elm) {
+ elm.getElementsByTagName("a")[0].click();
+ }
+ return;
+ }
+}
+
+function show_differences(cb) {
+ ID("diffrect").style.display = cb.checked ? "" : "none";
+}
+
+function flash_pixels(on) {
+ var stroke = on ? "red" : "black";
+ var strokeWidth = on ? "2px" : "1px";
+ for (var i = 0; i < gFlashingPixels.length; i++) {
+ gFlashingPixels[i].setAttribute("stroke", stroke);
+ gFlashingPixels[i].setAttribute("stroke-width", strokeWidth);
+ }
+}
+
+function cursor_point(evt) {
+ var m = evt.target.getScreenCTM().inverse();
+ var p = ID("svg").createSVGPoint();
+ p.x = evt.clientX;
+ p.y = evt.clientY;
+ p = p.matrixTransform(m);
+ return { x: Math.floor(p.x), y: Math.floor(p.y) };
+}
+
+function hex2(i) {
+ return (i < 16 ? "0" : "") + i.toString(16);
+}
+
+function canvas_pixel_as_hex(data, x, y) {
+ var offset = (y * data.width + x) * 4;
+ var r = data.data[offset];
+ var g = data.data[offset + 1];
+ var b = data.data[offset + 2];
+ return "#" + hex2(r) + hex2(g) + hex2(b);
+}
+
+function hex_as_rgb(hex) {
+ return "rgb(" + [parseInt(hex.substring(1, 3), 16), parseInt(hex.substring(3, 5), 16), parseInt(hex.substring(5, 7), 16)] + ")";
+}
+
+function magnify(evt) {
+ var { x: x, y: y } = cursor_point(evt);
+ var centerPixelColor1, centerPixelColor2;
+
+ var dx_lo = -Math.floor(gMagWidth / 2);
+ var dx_hi = Math.floor(gMagWidth / 2);
+ var dy_lo = -Math.floor(gMagHeight / 2);
+ var dy_hi = Math.floor(gMagHeight / 2);
+
+ flash_pixels(false);
+ gFlashingPixels = [];
+ for (var j = dy_lo; j <= dy_hi; j++) {
+ for (var i = dx_lo; i <= dx_hi; i++) {
+ var px = x + i;
+ var py = y + j;
+ var p1 = gMagPixPaths[i + dx_hi][j + dy_hi][0];
+ var p2 = gMagPixPaths[i + dx_hi][j + dy_hi][1];
+ // Here we just use the dimensions of gImage1Data since we expect test
+ // and reference to have the same dimensions.
+ if (px < 0 || py < 0 || px >= gImage1Data.width || py >= gImage1Data.height) {
+ p1.setAttribute("fill", "#aaa");
+ p2.setAttribute("fill", "#888");
+ } else {
+ var color1 = canvas_pixel_as_hex(gImage1Data, x + i, y + j);
+ var color2 = canvas_pixel_as_hex(gImage2Data, x + i, y + j);
+ p1.setAttribute("fill", color1);
+ p2.setAttribute("fill", color2);
+ if (color1 != color2) {
+ gFlashingPixels.push(p1, p2);
+ p1.parentNode.appendChild(p1);
+ p2.parentNode.appendChild(p2);
+ }
+ if (i == 0 && j == 0) {
+ centerPixelColor1 = color1;
+ centerPixelColor2 = color2;
+ }
+ }
+ }
+ }
+ flash_pixels(true);
+ show_pixelinfo(x, y, centerPixelColor1, hex_as_rgb(centerPixelColor1), centerPixelColor2, hex_as_rgb(centerPixelColor2));
+}
+
+function show_pixelinfo(x, y, pix1rgb, pix1hex, pix2rgb, pix2hex) {
+ var pixelinfo = ID("pixelinfo");
+ ID("coords").textContent = [x, y];
+ ID("pix1hex").textContent = pix1hex;
+ ID("pix1rgb").textContent = pix1rgb;
+ ID("pix2hex").textContent = pix2hex;
+ ID("pix2rgb").textContent = pix2rgb;
+}
+
+ ]]></script>
+
+</head>
+<body onload="load()">
+
+<div id="entry">
+
+<h1>Reftest analyzer: load reftest log</h1>
+
+<p>Either paste your log into this textarea:<br />
+<textarea cols="80" rows="10" id="logentry"/><br/>
+<input type="button" value="Process pasted log" onclick="log_pasted()" /></p>
+
+<p>... or load it from a file:<br/>
+<input type="file" id="fileentry" onchange="fileentry_changed()" />
+</p>
+</div>
+
+<div id="loading" style="display:none">Loading log...</div>
+
+<div id="viewer" style="display:none">
+ <div id="pixelarea">
+ <div id="pixelinfo">
+ <table>
+ <tbody>
+ <tr><th>Pixel at:</th><td colspan="2" id="coords"/></tr>
+ <tr><th>Image 1:</th><td id="pix1rgb"></td><td id="pix1hex"></td></tr>
+ <tr><th>Image 2:</th><td id="pix2rgb"></td><td id="pix2hex"></td></tr>
+ </tbody>
+ </table>
+ <div>
+ <div id="pixelhint">★
+ <div>
+ <p>Move the mouse over the reftest image on the right to show
+ magnified pixels on the left. The color information above is for
+ the pixel centered in the magnified view.</p>
+ <p>Image 1 is shown in the upper triangle of each pixel and Image 2
+ is shown in the lower triangle.</p>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div id="magnification">
+ <svg xmlns="http://www.w3.org/2000/svg" width="84" height="84" shape-rendering="optimizeSpeed">
+ <g id="mag"/>
+ </svg>
+ </div>
+ </div>
+ <div id="itemlist"></div>
+ <div id="images" style="display:none">
+ <form id="imgcontrols">
+ <input id="radio1" type="radio" name="which" value="0" onchange="show_image(1)" checked="checked" /><label id="label1" title="1" for="radio1">Image 1</label>
+ <input id="radio2" type="radio" name="which" value="1" onchange="show_image(2)" /><label id="label2" title="2" for="radio2">Image 2</label>
+ <label><input id="differences" type="checkbox" onchange="show_differences(this)" />Circle differences</label>
+ </form>
+ <svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="800" height="1000" id="svg">
+ <defs>
+ <!-- use sRGB to avoid loss of data -->
+ <filter id="showDifferences" x="0%" y="0%" width="100%" height="100%"
+ style="color-interpolation-filters: sRGB">
+ <feImage id="feimage1" result="img1" xlink:href="#image1" />
+ <feImage id="feimage2" result="img2" xlink:href="#image2" />
+ <!-- inv1 and inv2 are the images with RGB inverted -->
+ <feComponentTransfer result="inv1" in="img1">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <feComponentTransfer result="inv2" in="img2">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <!-- w1 will have non-white pixels anywhere that img2
+ is brighter than img1, and w2 for the reverse.
+ It would be nice not to have to go through these
+ intermediate states, but feComposite
+ type="arithmetic" can't transform the RGB channels
+ and leave the alpha channel untouched. -->
+ <feComposite result="w1" in="img1" in2="inv2" operator="arithmetic" k2="1" k3="1" />
+ <feComposite result="w2" in="img2" in2="inv1" operator="arithmetic" k2="1" k3="1" />
+ <!-- c1 will have non-black pixels anywhere that img2
+ is brighter than img1, and c2 for the reverse -->
+ <feComponentTransfer result="c1" in="w1">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <feComponentTransfer result="c2" in="w2">
+ <feFuncR type="linear" slope="-1" intercept="1" />
+ <feFuncG type="linear" slope="-1" intercept="1" />
+ <feFuncB type="linear" slope="-1" intercept="1" />
+ </feComponentTransfer>
+ <!-- c will be nonblack (and fully on) for every pixel+component where there are differences -->
+ <feComposite result="c" in="c1" in2="c2" operator="arithmetic" k2="255" k3="255" />
+ <!-- a will be opaque for every pixel with differences and transparent for all others -->
+ <feColorMatrix result="a" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0" />
+
+ <!-- a, dilated by 1 pixel -->
+ <feMorphology result="dila1" in="a" operator="dilate" radius="1" />
+ <!-- a, dilated by 2 pixels -->
+ <feMorphology result="dila2" in="dila1" operator="dilate" radius="1" />
+
+ <!-- all the pixels in the 2-pixel dilation of a but not in the 1-pixel dilation, to highlight the diffs -->
+ <feComposite result="highlight" in="dila2" in2="dila1" operator="out" />
+
+ <feFlood result="red" flood-color="red" />
+ <feComposite result="redhighlight" in="red" in2="highlight" operator="in" />
+ <feFlood result="black" flood-color="black" flood-opacity="0.5" />
+ <feMerge>
+ <feMergeNode in="black" />
+ <feMergeNode in="redhighlight" />
+ </feMerge>
+ </filter>
+ </defs>
+ <g onmousemove="magnify(evt)">
+ <image x="0" y="0" width="100%" height="100%" id="image1" />
+ <image x="0" y="0" width="100%" height="100%" id="image2" />
+ </g>
+ <rect id="diffrect" filter="url(#showDifferences)" pointer-events="none" x="0" y="0" width="100%" height="100%" />
+ </svg>
+ </div>
+</div>
+
+</body>
+</html>
diff --git a/layout/tools/reftest/reftest-content.js b/layout/tools/reftest/reftest-content.js
new file mode 100644
index 000000000..f26cae8ef
--- /dev/null
+++ b/layout/tools/reftest/reftest-content.js
@@ -0,0 +1,1174 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 4 -*- /
+/* vim: set shiftwidth=4 tabstop=8 autoindent cindent expandtab: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var CC = Components.classes;
+const CI = Components.interfaces;
+const CR = Components.results;
+const CU = Components.utils;
+
+const XHTML_NS = "http://www.w3.org/1999/xhtml";
+
+const DEBUG_CONTRACTID = "@mozilla.org/xpcom/debug;1";
+const PRINTSETTINGS_CONTRACTID = "@mozilla.org/gfx/printsettings-service;1";
+const ENVIRONMENT_CONTRACTID = "@mozilla.org/process/environment;1";
+const NS_OBSERVER_SERVICE_CONTRACTID = "@mozilla.org/observer-service;1";
+const NS_GFXINFO_CONTRACTID = "@mozilla.org/gfx/info;1";
+
+// "<!--CLEAR-->"
+const BLANK_URL_FOR_CLEARING = "data:text/html;charset=UTF-8,%3C%21%2D%2DCLEAR%2D%2D%3E";
+
+CU.import("resource://gre/modules/Timer.jsm");
+CU.import("resource://gre/modules/AsyncSpellCheckTestHelper.jsm");
+
+var gBrowserIsRemote;
+var gHaveCanvasSnapshot = false;
+// Plugin layers can be updated asynchronously, so to make sure that all
+// layer surfaces have the right content, we need to listen for explicit
+// "MozPaintWait" and "MozPaintWaitFinished" events that signal when it's OK
+// to take snapshots. We cannot take a snapshot while the number of
+// "MozPaintWait" events fired exceeds the number of "MozPaintWaitFinished"
+// events fired. We count the number of such excess events here. When
+// the counter reaches zero we call gExplicitPendingPaintsCompleteHook.
+var gExplicitPendingPaintCount = 0;
+var gExplicitPendingPaintsCompleteHook;
+var gCurrentURL;
+var gCurrentTestType;
+var gTimeoutHook = null;
+var gFailureTimeout = null;
+var gFailureReason;
+var gAssertionCount = 0;
+var gTestCount = 0;
+
+var gDebug;
+var gVerbose = false;
+
+var gCurrentTestStartTime;
+var gClearingForAssertionCheck = false;
+
+const TYPE_LOAD = 'load'; // test without a reference (just test that it does
+ // not assert, crash, hang, or leak)
+const TYPE_SCRIPT = 'script'; // test contains individual test results
+
+function markupDocumentViewer() {
+ return docShell.contentViewer;
+}
+
+function webNavigation() {
+ return docShell.QueryInterface(CI.nsIWebNavigation);
+}
+
+function windowUtilsForWindow(w) {
+ return w.QueryInterface(CI.nsIInterfaceRequestor)
+ .getInterface(CI.nsIDOMWindowUtils);
+}
+
+function windowUtils() {
+ return windowUtilsForWindow(content);
+}
+
+function IDForEventTarget(event)
+{
+ try {
+ return "'" + event.target.getAttribute('id') + "'";
+ } catch (ex) {
+ return "<unknown>";
+ }
+}
+
+function PaintWaitListener(event)
+{
+ LogInfo("MozPaintWait received for ID " + IDForEventTarget(event));
+ gExplicitPendingPaintCount++;
+}
+
+function PaintWaitFinishedListener(event)
+{
+ LogInfo("MozPaintWaitFinished received for ID " + IDForEventTarget(event));
+ gExplicitPendingPaintCount--;
+ if (gExplicitPendingPaintCount < 0) {
+ LogWarning("Underrun in gExplicitPendingPaintCount\n");
+ gExplicitPendingPaintCount = 0;
+ }
+ if (gExplicitPendingPaintCount == 0 &&
+ gExplicitPendingPaintsCompleteHook) {
+ gExplicitPendingPaintsCompleteHook();
+ }
+}
+
+function OnInitialLoad()
+{
+#ifndef REFTEST_B2G
+ removeEventListener("load", OnInitialLoad, true);
+#endif
+
+ gDebug = CC[DEBUG_CONTRACTID].getService(CI.nsIDebug2);
+ var env = CC[ENVIRONMENT_CONTRACTID].getService(CI.nsIEnvironment);
+ gVerbose = !!env.get("MOZ_REFTEST_VERBOSE");
+
+ RegisterMessageListeners();
+
+ var initInfo = SendContentReady();
+ gBrowserIsRemote = initInfo.remote;
+
+ addEventListener("load", OnDocumentLoad, true);
+
+ addEventListener("MozPaintWait", PaintWaitListener, true);
+ addEventListener("MozPaintWaitFinished", PaintWaitFinishedListener, true);
+
+ LogInfo("Using browser remote="+ gBrowserIsRemote +"\n");
+}
+
+function SetFailureTimeout(cb, timeout)
+{
+ var targetTime = Date.now() + timeout;
+
+ var wrapper = function() {
+ // Timeouts can fire prematurely in some cases (e.g. in chaos mode). If this
+ // happens, set another timeout for the remaining time.
+ let remainingMs = targetTime - Date.now();
+ if (remainingMs > 0) {
+ SetFailureTimeout(cb, remainingMs);
+ } else {
+ cb();
+ }
+ }
+
+ gFailureTimeout = setTimeout(wrapper, timeout);
+}
+
+function StartTestURI(type, uri, timeout)
+{
+ // The GC is only able to clean up compartments after the CC runs. Since
+ // the JS ref tests disable the normal browser chrome and do not otherwise
+ // create substatial DOM garbage, the CC tends not to run enough normally.
+ ++gTestCount;
+ if (gTestCount % 1000 == 0) {
+ CU.forceGC();
+ CU.forceCC();
+ }
+
+ // Reset gExplicitPendingPaintCount in case there was a timeout or
+ // the count is out of sync for some other reason
+ if (gExplicitPendingPaintCount != 0) {
+ LogWarning("Resetting gExplicitPendingPaintCount to zero (currently " +
+ gExplicitPendingPaintCount + "\n");
+ gExplicitPendingPaintCount = 0;
+ }
+
+ gCurrentTestType = type;
+ gCurrentURL = uri;
+
+ gCurrentTestStartTime = Date.now();
+ if (gFailureTimeout != null) {
+ SendException("program error managing timeouts\n");
+ }
+ SetFailureTimeout(LoadFailed, timeout);
+
+ LoadURI(gCurrentURL);
+}
+
+function setupFullZoom(contentRootElement) {
+ if (!contentRootElement || !contentRootElement.hasAttribute('reftest-zoom'))
+ return;
+ markupDocumentViewer().fullZoom =
+ contentRootElement.getAttribute('reftest-zoom');
+}
+
+function resetZoom() {
+ markupDocumentViewer().fullZoom = 1.0;
+}
+
+function doPrintMode(contentRootElement) {
+#if REFTEST_B2G
+ // nsIPrintSettings not available in B2G
+ return false;
+#else
+ // use getAttribute because className works differently in HTML and SVG
+ return contentRootElement &&
+ contentRootElement.hasAttribute('class') &&
+ contentRootElement.getAttribute('class').split(/\s+/)
+ .indexOf("reftest-print") != -1;
+#endif
+}
+
+function setupPrintMode() {
+ var PSSVC =
+ CC[PRINTSETTINGS_CONTRACTID].getService(CI.nsIPrintSettingsService);
+ var ps = PSSVC.newPrintSettings;
+ ps.paperWidth = 5;
+ ps.paperHeight = 3;
+
+ // Override any os-specific unwriteable margins
+ ps.unwriteableMarginTop = 0;
+ ps.unwriteableMarginLeft = 0;
+ ps.unwriteableMarginBottom = 0;
+ ps.unwriteableMarginRight = 0;
+
+ ps.headerStrLeft = "";
+ ps.headerStrCenter = "";
+ ps.headerStrRight = "";
+ ps.footerStrLeft = "";
+ ps.footerStrCenter = "";
+ ps.footerStrRight = "";
+ docShell.contentViewer.setPageMode(true, ps);
+}
+
+function attrOrDefault(element, attr, def) {
+ return element.hasAttribute(attr) ? Number(element.getAttribute(attr)) : def;
+}
+
+function setupViewport(contentRootElement) {
+ if (!contentRootElement) {
+ return;
+ }
+
+ var sw = attrOrDefault(contentRootElement, "reftest-scrollport-w", 0);
+ var sh = attrOrDefault(contentRootElement, "reftest-scrollport-h", 0);
+ if (sw !== 0 || sh !== 0) {
+ LogInfo("Setting scrollport to <w=" + sw + ", h=" + sh + ">");
+ windowUtils().setScrollPositionClampingScrollPortSize(sw, sh);
+ }
+
+ // XXX support resolution when needed
+
+ // XXX support viewconfig when needed
+}
+
+function setupDisplayport(contentRootElement) {
+ if (!contentRootElement) {
+ return;
+ }
+
+ function setupDisplayportForElement(element, winUtils) {
+ var dpw = attrOrDefault(element, "reftest-displayport-w", 0);
+ var dph = attrOrDefault(element, "reftest-displayport-h", 0);
+ var dpx = attrOrDefault(element, "reftest-displayport-x", 0);
+ var dpy = attrOrDefault(element, "reftest-displayport-y", 0);
+ if (dpw !== 0 || dph !== 0 || dpx != 0 || dpy != 0) {
+ LogInfo("Setting displayport to <x="+ dpx +", y="+ dpy +", w="+ dpw +", h="+ dph +">");
+ winUtils.setDisplayPortForElement(dpx, dpy, dpw, dph, element, 1);
+ }
+ }
+
+ function setupDisplayportForElementSubtree(element, winUtils) {
+ setupDisplayportForElement(element, winUtils);
+ for (var c = element.firstElementChild; c; c = c.nextElementSibling) {
+ setupDisplayportForElementSubtree(c, winUtils);
+ }
+ if (element.contentDocument) {
+ LogInfo("Descending into subdocument");
+ setupDisplayportForElementSubtree(element.contentDocument.documentElement,
+ windowUtilsForWindow(element.contentWindow));
+ }
+ }
+
+ if (contentRootElement.hasAttribute("reftest-async-scroll")) {
+ setupDisplayportForElementSubtree(contentRootElement, windowUtils());
+ } else {
+ setupDisplayportForElement(contentRootElement, windowUtils());
+ }
+}
+
+// Returns whether any offsets were updated
+function setupAsyncScrollOffsets(options) {
+ var currentDoc = content.document;
+ var contentRootElement = currentDoc ? currentDoc.documentElement : null;
+
+ if (!contentRootElement) {
+ return false;
+ }
+
+ function setupAsyncScrollOffsetsForElement(element, winUtils) {
+ var sx = attrOrDefault(element, "reftest-async-scroll-x", 0);
+ var sy = attrOrDefault(element, "reftest-async-scroll-y", 0);
+ if (sx != 0 || sy != 0) {
+ try {
+ // This might fail when called from RecordResult since layers
+ // may not have been constructed yet
+ winUtils.setAsyncScrollOffset(element, sx, sy);
+ return true;
+ } catch (e) {
+ if (!options.allowFailure) {
+ throw e;
+ }
+ }
+ }
+ return false;
+ }
+
+ function setupAsyncScrollOffsetsForElementSubtree(element, winUtils) {
+ var updatedAny = setupAsyncScrollOffsetsForElement(element, winUtils);
+ for (var c = element.firstElementChild; c; c = c.nextElementSibling) {
+ if (setupAsyncScrollOffsetsForElementSubtree(c, winUtils)) {
+ updatedAny = true;
+ }
+ }
+ if (element.contentDocument) {
+ LogInfo("Descending into subdocument (async offsets)");
+ if (setupAsyncScrollOffsetsForElementSubtree(element.contentDocument.documentElement,
+ windowUtilsForWindow(element.contentWindow))) {
+ updatedAny = true;
+ }
+ }
+ return updatedAny;
+ }
+
+ var asyncScroll = contentRootElement.hasAttribute("reftest-async-scroll");
+ if (asyncScroll) {
+ return setupAsyncScrollOffsetsForElementSubtree(contentRootElement, windowUtils());
+ }
+ return false;
+}
+
+function setupAsyncZoom(options) {
+ var currentDoc = content.document;
+ var contentRootElement = currentDoc ? currentDoc.documentElement : null;
+
+ if (!contentRootElement || !contentRootElement.hasAttribute('reftest-async-zoom'))
+ return false;
+
+ var zoom = attrOrDefault(contentRootElement, "reftest-async-zoom", 1);
+ if (zoom != 1) {
+ try {
+ windowUtils().setAsyncZoom(contentRootElement, zoom);
+ return true;
+ } catch (e) {
+ if (!options.allowFailure) {
+ throw e;
+ }
+ }
+ }
+ return false;
+}
+
+
+function resetDisplayportAndViewport() {
+ // XXX currently the displayport configuration lives on the
+ // presshell and so is "reset" on nav when we get a new presshell.
+}
+
+function shouldWaitForExplicitPaintWaiters() {
+ return gExplicitPendingPaintCount > 0;
+}
+
+function shouldWaitForPendingPaints() {
+ // if gHaveCanvasSnapshot is false, we're not taking snapshots so
+ // there is no need to wait for pending paints to be flushed.
+ return gHaveCanvasSnapshot && windowUtils().isMozAfterPaintPending;
+}
+
+function shouldWaitForReftestWaitRemoval(contentRootElement) {
+ // use getAttribute because className works differently in HTML and SVG
+ return contentRootElement &&
+ contentRootElement.hasAttribute('class') &&
+ contentRootElement.getAttribute('class').split(/\s+/)
+ .indexOf("reftest-wait") != -1;
+}
+
+function shouldSnapshotWholePage(contentRootElement) {
+ // use getAttribute because className works differently in HTML and SVG
+ return contentRootElement &&
+ contentRootElement.hasAttribute('class') &&
+ contentRootElement.getAttribute('class').split(/\s+/)
+ .indexOf("reftest-snapshot-all") != -1;
+}
+
+function getNoPaintElements(contentRootElement) {
+ return contentRootElement.getElementsByClassName('reftest-no-paint');
+}
+
+function getOpaqueLayerElements(contentRootElement) {
+ return contentRootElement.getElementsByClassName('reftest-opaque-layer');
+}
+
+function getAssignedLayerMap(contentRootElement) {
+ var layerNameToElementsMap = {};
+ var elements = contentRootElement.querySelectorAll('[reftest-assigned-layer]');
+ for (var i = 0; i < elements.length; ++i) {
+ var element = elements[i];
+ var layerName = element.getAttribute('reftest-assigned-layer');
+ if (!(layerName in layerNameToElementsMap)) {
+ layerNameToElementsMap[layerName] = [];
+ }
+ layerNameToElementsMap[layerName].push(element);
+ }
+ return layerNameToElementsMap;
+}
+
+// Initial state. When the document has loaded and all MozAfterPaint events and
+// all explicit paint waits are flushed, we can fire the MozReftestInvalidate
+// event and move to the next state.
+const STATE_WAITING_TO_FIRE_INVALIDATE_EVENT = 0;
+// When reftest-wait has been removed from the root element, we can move to the
+// next state.
+const STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL = 1;
+// When spell checking is done on all spell-checked elements, we can move to the
+// next state.
+const STATE_WAITING_FOR_SPELL_CHECKS = 2;
+// When any pending compositor-side repaint requests have been flushed, we can
+// move to the next state.
+const STATE_WAITING_FOR_APZ_FLUSH = 3;
+// When all MozAfterPaint events and all explicit paint waits are flushed, we're
+// done and can move to the COMPLETED state.
+const STATE_WAITING_TO_FINISH = 4;
+const STATE_COMPLETED = 5;
+
+function FlushRendering() {
+ var anyPendingPaintsGeneratedInDescendants = false;
+
+ function flushWindow(win) {
+ var utils = win.QueryInterface(CI.nsIInterfaceRequestor)
+ .getInterface(CI.nsIDOMWindowUtils);
+ var afterPaintWasPending = utils.isMozAfterPaintPending;
+
+ var root = win.document.documentElement;
+ if (root && !root.classList.contains("reftest-no-flush")) {
+ try {
+ // Flush pending restyles and reflows for this window
+ root.getBoundingClientRect();
+ } catch (e) {
+ LogWarning("flushWindow failed: " + e + "\n");
+ }
+ }
+
+ if (!afterPaintWasPending && utils.isMozAfterPaintPending) {
+ LogInfo("FlushRendering generated paint for window " + win.location.href);
+ anyPendingPaintsGeneratedInDescendants = true;
+ }
+
+ for (var i = 0; i < win.frames.length; ++i) {
+ flushWindow(win.frames[i]);
+ }
+ }
+
+ flushWindow(content);
+
+ if (anyPendingPaintsGeneratedInDescendants &&
+ !windowUtils().isMozAfterPaintPending) {
+ LogWarning("Internal error: descendant frame generated a MozAfterPaint event, but the root document doesn't have one!");
+ }
+}
+
+function WaitForTestEnd(contentRootElement, inPrintMode, spellCheckedElements) {
+ var stopAfterPaintReceived = false;
+ var currentDoc = content.document;
+ var state = STATE_WAITING_TO_FIRE_INVALIDATE_EVENT;
+
+ function AfterPaintListener(event) {
+ LogInfo("AfterPaintListener in " + event.target.document.location.href);
+ if (event.target.document != currentDoc) {
+ // ignore paint events for subframes or old documents in the window.
+ // Invalidation in subframes will cause invalidation in the toplevel document anyway.
+ return;
+ }
+
+ SendUpdateCanvasForEvent(event, contentRootElement);
+ // These events are fired immediately after a paint. Don't
+ // confuse ourselves by firing synchronously if we triggered the
+ // paint ourselves.
+ setTimeout(MakeProgress, 0);
+ }
+
+ function AttrModifiedListener() {
+ LogInfo("AttrModifiedListener fired");
+ // Wait for the next return-to-event-loop before continuing --- for
+ // example, the attribute may have been modified in an subdocument's
+ // load event handler, in which case we need load event processing
+ // to complete and unsuppress painting before we check isMozAfterPaintPending.
+ setTimeout(MakeProgress, 0);
+ }
+
+ function ExplicitPaintsCompleteListener() {
+ LogInfo("ExplicitPaintsCompleteListener fired");
+ // Since this can fire while painting, don't confuse ourselves by
+ // firing synchronously. It's fine to do this asynchronously.
+ setTimeout(MakeProgress, 0);
+ }
+
+ function RemoveListeners() {
+ // OK, we can end the test now.
+ removeEventListener("MozAfterPaint", AfterPaintListener, false);
+ if (contentRootElement) {
+ contentRootElement.removeEventListener("DOMAttrModified", AttrModifiedListener, false);
+ }
+ gExplicitPendingPaintsCompleteHook = null;
+ gTimeoutHook = null;
+ // Make sure we're in the COMPLETED state just in case
+ // (this may be called via the test-timeout hook)
+ state = STATE_COMPLETED;
+ }
+
+ // Everything that could cause shouldWaitForXXX() to
+ // change from returning true to returning false is monitored via some kind
+ // of event listener which eventually calls this function.
+ function MakeProgress() {
+ if (state >= STATE_COMPLETED) {
+ LogInfo("MakeProgress: STATE_COMPLETED");
+ return;
+ }
+
+ FlushRendering();
+
+ switch (state) {
+ case STATE_WAITING_TO_FIRE_INVALIDATE_EVENT: {
+ LogInfo("MakeProgress: STATE_WAITING_TO_FIRE_INVALIDATE_EVENT");
+ if (shouldWaitForExplicitPaintWaiters() || shouldWaitForPendingPaints()) {
+ gFailureReason = "timed out waiting for pending paint count to reach zero";
+ if (shouldWaitForExplicitPaintWaiters()) {
+ gFailureReason += " (waiting for MozPaintWaitFinished)";
+ LogInfo("MakeProgress: waiting for MozPaintWaitFinished");
+ }
+ if (shouldWaitForPendingPaints()) {
+ gFailureReason += " (waiting for MozAfterPaint)";
+ LogInfo("MakeProgress: waiting for MozAfterPaint");
+ }
+ return;
+ }
+
+ state = STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL;
+ var hasReftestWait = shouldWaitForReftestWaitRemoval(contentRootElement);
+ // Notify the test document that now is a good time to test some invalidation
+ LogInfo("MakeProgress: dispatching MozReftestInvalidate");
+ if (contentRootElement) {
+ var elements = getNoPaintElements(contentRootElement);
+ for (var i = 0; i < elements.length; ++i) {
+ windowUtils().checkAndClearPaintedState(elements[i]);
+ }
+ var notification = content.document.createEvent("Events");
+ notification.initEvent("MozReftestInvalidate", true, false);
+ contentRootElement.dispatchEvent(notification);
+ }
+
+ if (!inPrintMode && doPrintMode(contentRootElement)) {
+ LogInfo("MakeProgress: setting up print mode");
+ setupPrintMode();
+ }
+
+ if (hasReftestWait && !shouldWaitForReftestWaitRemoval(contentRootElement)) {
+ // MozReftestInvalidate handler removed reftest-wait.
+ // We expect something to have been invalidated...
+ FlushRendering();
+ if (!shouldWaitForPendingPaints() && !shouldWaitForExplicitPaintWaiters()) {
+ LogWarning("MozInvalidateEvent didn't invalidate");
+ }
+ }
+ // Try next state
+ MakeProgress();
+ return;
+ }
+
+ case STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL:
+ LogInfo("MakeProgress: STATE_WAITING_FOR_REFTEST_WAIT_REMOVAL");
+ if (shouldWaitForReftestWaitRemoval(contentRootElement)) {
+ gFailureReason = "timed out waiting for reftest-wait to be removed";
+ LogInfo("MakeProgress: waiting for reftest-wait to be removed");
+ return;
+ }
+
+ // Try next state
+ state = STATE_WAITING_FOR_SPELL_CHECKS;
+ MakeProgress();
+ return;
+
+ case STATE_WAITING_FOR_SPELL_CHECKS:
+ LogInfo("MakeProgress: STATE_WAITING_FOR_SPELL_CHECKS");
+ if (numPendingSpellChecks) {
+ gFailureReason = "timed out waiting for spell checks to end";
+ LogInfo("MakeProgress: waiting for spell checks to end");
+ return;
+ }
+
+ state = STATE_WAITING_FOR_APZ_FLUSH;
+ LogInfo("MakeProgress: STATE_WAITING_FOR_APZ_FLUSH");
+ gFailureReason = "timed out waiting for APZ flush to complete";
+
+ var os = CC[NS_OBSERVER_SERVICE_CONTRACTID].getService(CI.nsIObserverService);
+ var flushWaiter = function(aSubject, aTopic, aData) {
+ if (aTopic) LogInfo("MakeProgress: apz-repaints-flushed fired");
+ os.removeObserver(flushWaiter, "apz-repaints-flushed");
+ state = STATE_WAITING_TO_FINISH;
+ MakeProgress();
+ };
+ os.addObserver(flushWaiter, "apz-repaints-flushed", false);
+
+ var willSnapshot = (gCurrentTestType != TYPE_SCRIPT) &&
+ (gCurrentTestType != TYPE_LOAD);
+ var noFlush =
+ !(contentRootElement &&
+ contentRootElement.classList.contains("reftest-no-flush"));
+ if (noFlush && willSnapshot && windowUtils().flushApzRepaints()) {
+ LogInfo("MakeProgress: done requesting APZ flush");
+ } else {
+ LogInfo("MakeProgress: APZ flush not required");
+ flushWaiter(null, null, null);
+ }
+ return;
+
+ case STATE_WAITING_FOR_APZ_FLUSH:
+ LogInfo("MakeProgress: STATE_WAITING_FOR_APZ_FLUSH");
+ // Nothing to do here; once we get the apz-repaints-flushed event
+ // we will go to STATE_WAITING_TO_FINISH
+ return;
+
+ case STATE_WAITING_TO_FINISH:
+ LogInfo("MakeProgress: STATE_WAITING_TO_FINISH");
+ if (shouldWaitForExplicitPaintWaiters() || shouldWaitForPendingPaints()) {
+ gFailureReason = "timed out waiting for pending paint count to " +
+ "reach zero (after reftest-wait removed and switch to print mode)";
+ if (shouldWaitForExplicitPaintWaiters()) {
+ gFailureReason += " (waiting for MozPaintWaitFinished)";
+ LogInfo("MakeProgress: waiting for MozPaintWaitFinished");
+ }
+ if (shouldWaitForPendingPaints()) {
+ gFailureReason += " (waiting for MozAfterPaint)";
+ LogInfo("MakeProgress: waiting for MozAfterPaint");
+ }
+ return;
+ }
+ if (contentRootElement) {
+ var elements = getNoPaintElements(contentRootElement);
+ for (var i = 0; i < elements.length; ++i) {
+ if (windowUtils().checkAndClearPaintedState(elements[i])) {
+ SendFailedNoPaint();
+ }
+ }
+ CheckLayerAssertions(contentRootElement);
+ }
+ LogInfo("MakeProgress: Completed");
+ state = STATE_COMPLETED;
+ gFailureReason = "timed out while taking snapshot (bug in harness?)";
+ RemoveListeners();
+ CheckForProcessCrashExpectation();
+ setTimeout(RecordResult, 0);
+ return;
+ }
+ }
+
+ LogInfo("WaitForTestEnd: Adding listeners");
+ addEventListener("MozAfterPaint", AfterPaintListener, false);
+ // If contentRootElement is null then shouldWaitForReftestWaitRemoval will
+ // always return false so we don't need a listener anyway
+ if (contentRootElement) {
+ contentRootElement.addEventListener("DOMAttrModified", AttrModifiedListener, false);
+ }
+ gExplicitPendingPaintsCompleteHook = ExplicitPaintsCompleteListener;
+ gTimeoutHook = RemoveListeners;
+
+ // Listen for spell checks on spell-checked elements.
+ var numPendingSpellChecks = spellCheckedElements.length;
+ function decNumPendingSpellChecks() {
+ --numPendingSpellChecks;
+ MakeProgress();
+ }
+ for (let editable of spellCheckedElements) {
+ try {
+ onSpellCheck(editable, decNumPendingSpellChecks);
+ } catch (err) {
+ // The element may not have an editor, so ignore it.
+ setTimeout(decNumPendingSpellChecks, 0);
+ }
+ }
+
+ // Take a full snapshot now that all our listeners are set up. This
+ // ensures it's impossible for us to miss updates between taking the snapshot
+ // and adding our listeners.
+ SendInitCanvasWithSnapshot();
+ MakeProgress();
+}
+
+function OnDocumentLoad(event)
+{
+ var currentDoc = content.document;
+ if (event.target != currentDoc)
+ // Ignore load events for subframes.
+ return;
+
+ if (gClearingForAssertionCheck &&
+ currentDoc.location.href == BLANK_URL_FOR_CLEARING) {
+ DoAssertionCheck();
+ return;
+ }
+
+ if (currentDoc.location.href != gCurrentURL) {
+ LogInfo("OnDocumentLoad fired for previous document");
+ // Ignore load events for previous documents.
+ return;
+ }
+
+ // Collect all editable, spell-checked elements. It may be the case that
+ // not all the elements that match this selector will be spell checked: for
+ // example, a textarea without a spellcheck attribute may have a parent with
+ // spellcheck=false, or script may set spellcheck=false on an element whose
+ // markup sets it to true. But that's OK since onSpellCheck detects the
+ // absence of spell checking, too.
+ var querySelector =
+ '*[class~="spell-checked"],' +
+ 'textarea:not([spellcheck="false"]),' +
+ 'input[spellcheck]:-moz-any([spellcheck=""],[spellcheck="true"]),' +
+ '*[contenteditable]:-moz-any([contenteditable=""],[contenteditable="true"])';
+ var spellCheckedElements = currentDoc.querySelectorAll(querySelector);
+
+ var contentRootElement = currentDoc ? currentDoc.documentElement : null;
+ currentDoc = null;
+ setupFullZoom(contentRootElement);
+ setupViewport(contentRootElement);
+ setupDisplayport(contentRootElement);
+ var inPrintMode = false;
+
+ function AfterOnLoadScripts() {
+ // Regrab the root element, because the document may have changed.
+ var contentRootElement =
+ content.document ? content.document.documentElement : null;
+
+ // Flush the document in case it got modified in a load event handler.
+ FlushRendering();
+
+ // Take a snapshot now. We need to do this before we check whether
+ // we should wait, since this might trigger dispatching of
+ // MozPaintWait events and make shouldWaitForExplicitPaintWaiters() true
+ // below.
+ var painted = SendInitCanvasWithSnapshot();
+
+ if (shouldWaitForExplicitPaintWaiters() ||
+ (!inPrintMode && doPrintMode(contentRootElement)) ||
+ // If we didn't force a paint above, in
+ // InitCurrentCanvasWithSnapshot, so we should wait for a
+ // paint before we consider them done.
+ !painted) {
+ LogInfo("AfterOnLoadScripts belatedly entering WaitForTestEnd");
+ // Go into reftest-wait mode belatedly.
+ WaitForTestEnd(contentRootElement, inPrintMode, []);
+ } else {
+ CheckLayerAssertions(contentRootElement);
+ CheckForProcessCrashExpectation(contentRootElement);
+ RecordResult();
+ }
+ }
+
+ if (shouldWaitForReftestWaitRemoval(contentRootElement) ||
+ shouldWaitForExplicitPaintWaiters() ||
+ spellCheckedElements.length) {
+ // Go into reftest-wait mode immediately after painting has been
+ // unsuppressed, after the onload event has finished dispatching.
+ gFailureReason = "timed out waiting for test to complete (trying to get into WaitForTestEnd)";
+ LogInfo("OnDocumentLoad triggering WaitForTestEnd");
+ setTimeout(function () { WaitForTestEnd(contentRootElement, inPrintMode, spellCheckedElements); }, 0);
+ } else {
+ if (doPrintMode(contentRootElement)) {
+ LogInfo("OnDocumentLoad setting up print mode");
+ setupPrintMode();
+ inPrintMode = true;
+ }
+
+ // Since we can't use a bubbling-phase load listener from chrome,
+ // this is a capturing phase listener. So do setTimeout twice, the
+ // first to get us after the onload has fired in the content, and
+ // the second to get us after any setTimeout(foo, 0) in the content.
+ gFailureReason = "timed out waiting for test to complete (waiting for onload scripts to complete)";
+ LogInfo("OnDocumentLoad triggering AfterOnLoadScripts");
+ setTimeout(function () { setTimeout(AfterOnLoadScripts, 0); }, 0);
+ }
+}
+
+function CheckLayerAssertions(contentRootElement)
+{
+ if (!contentRootElement) {
+ return;
+ }
+
+ var opaqueLayerElements = getOpaqueLayerElements(contentRootElement);
+ for (var i = 0; i < opaqueLayerElements.length; ++i) {
+ var elem = opaqueLayerElements[i];
+ try {
+ if (!windowUtils().isPartOfOpaqueLayer(elem)) {
+ SendFailedOpaqueLayer(elementDescription(elem) + ' is not part of an opaque layer');
+ }
+ } catch (e) {
+ SendFailedOpaqueLayer('got an exception while checking whether ' + elementDescription(elem) + ' is part of an opaque layer');
+ }
+ }
+ var layerNameToElementsMap = getAssignedLayerMap(contentRootElement);
+ var oneOfEach = [];
+ // Check that elements with the same reftest-assigned-layer share the same PaintedLayer.
+ for (var layerName in layerNameToElementsMap) {
+ try {
+ var elements = layerNameToElementsMap[layerName];
+ oneOfEach.push(elements[0]);
+ var numberOfLayers = windowUtils().numberOfAssignedPaintedLayers(elements, elements.length);
+ if (numberOfLayers !== 1) {
+ SendFailedAssignedLayer('these elements are assigned to ' + numberOfLayers +
+ ' different layers, instead of sharing just one layer: ' +
+ elements.map(elementDescription).join(', '));
+ }
+ } catch (e) {
+ SendFailedAssignedLayer('got an exception while checking whether these elements share a layer: ' +
+ elements.map(elementDescription).join(', '));
+ }
+ }
+ // Check that elements with different reftest-assigned-layer are assigned to different PaintedLayers.
+ if (oneOfEach.length > 0) {
+ try {
+ var numberOfLayers = windowUtils().numberOfAssignedPaintedLayers(oneOfEach, oneOfEach.length);
+ if (numberOfLayers !== oneOfEach.length) {
+ SendFailedAssignedLayer('these elements are assigned to ' + numberOfLayers +
+ ' different layers, instead of having none in common (expected ' +
+ oneOfEach.length + ' different layers): ' +
+ oneOfEach.map(elementDescription).join(', '));
+ }
+ } catch (e) {
+ SendFailedAssignedLayer('got an exception while checking whether these elements are assigned to different layers: ' +
+ oneOfEach.map(elementDescription).join(', '));
+ }
+ }
+}
+
+function CheckForProcessCrashExpectation(contentRootElement)
+{
+ if (contentRootElement &&
+ contentRootElement.hasAttribute('class') &&
+ contentRootElement.getAttribute('class').split(/\s+/)
+ .indexOf("reftest-expect-process-crash") != -1) {
+ SendExpectProcessCrash();
+ }
+}
+
+function RecordResult()
+{
+ LogInfo("RecordResult fired");
+
+ var currentTestRunTime = Date.now() - gCurrentTestStartTime;
+
+ clearTimeout(gFailureTimeout);
+ gFailureReason = null;
+ gFailureTimeout = null;
+
+ if (gCurrentTestType == TYPE_SCRIPT) {
+ var error = '';
+ var testwindow = content;
+
+ if (testwindow.wrappedJSObject)
+ testwindow = testwindow.wrappedJSObject;
+
+ var testcases;
+ if (!testwindow.getTestCases || typeof testwindow.getTestCases != "function") {
+ // Force an unexpected failure to alert the test author to fix the test.
+ error = "test must provide a function getTestCases(). (SCRIPT)\n";
+ }
+ else if (!(testcases = testwindow.getTestCases())) {
+ // Force an unexpected failure to alert the test author to fix the test.
+ error = "test's getTestCases() must return an Array-like Object. (SCRIPT)\n";
+ }
+ else if (testcases.length == 0) {
+ // This failure may be due to a JavaScript Engine bug causing
+ // early termination of the test. If we do not allow silent
+ // failure, the driver will report an error.
+ }
+
+ var results = [ ];
+ if (!error) {
+ // FIXME/bug 618176: temporary workaround
+ for (var i = 0; i < testcases.length; ++i) {
+ var test = testcases[i];
+ results.push({ passed: test.testPassed(),
+ description: test.testDescription() });
+ }
+ //results = testcases.map(function(test) {
+ // return { passed: test.testPassed(),
+ // description: test.testDescription() };
+ }
+
+ SendScriptResults(currentTestRunTime, error, results);
+ FinishTestItem();
+ return;
+ }
+
+ // Setup async scroll offsets now in case SynchronizeForSnapshot is not
+ // called (due to reftest-no-sync-layers being supplied, or in the single
+ // process case).
+ var changedAsyncScrollZoom = false;
+ if (setupAsyncScrollOffsets({allowFailure:true})) {
+ changedAsyncScrollZoom = true;
+ }
+ if (setupAsyncZoom({allowFailure:true})) {
+ changedAsyncScrollZoom = true;
+ }
+ if (changedAsyncScrollZoom && !gBrowserIsRemote) {
+ sendAsyncMessage("reftest:UpdateWholeCanvasForInvalidation");
+ }
+
+ SendTestDone(currentTestRunTime);
+ FinishTestItem();
+}
+
+function LoadFailed()
+{
+ if (gTimeoutHook) {
+ gTimeoutHook();
+ }
+ gFailureTimeout = null;
+ SendFailedLoad(gFailureReason);
+}
+
+function FinishTestItem()
+{
+ gHaveCanvasSnapshot = false;
+}
+
+function DoAssertionCheck()
+{
+ gClearingForAssertionCheck = false;
+
+ var numAsserts = 0;
+ if (gDebug.isDebugBuild) {
+ var newAssertionCount = gDebug.assertionCount;
+ numAsserts = newAssertionCount - gAssertionCount;
+ gAssertionCount = newAssertionCount;
+ }
+ SendAssertionCount(numAsserts);
+}
+
+function LoadURI(uri)
+{
+ var flags = webNavigation().LOAD_FLAGS_NONE;
+ webNavigation().loadURI(uri, flags, null, null, null);
+}
+
+function LogWarning(str)
+{
+ if (gVerbose) {
+ sendSyncMessage("reftest:Log", { type: "warning", msg: str });
+ } else {
+ sendAsyncMessage("reftest:Log", { type: "warning", msg: str });
+ }
+}
+
+function LogInfo(str)
+{
+ if (gVerbose) {
+ sendSyncMessage("reftest:Log", { type: "info", msg: str });
+ } else {
+ sendAsyncMessage("reftest:Log", { type: "info", msg: str });
+ }
+}
+
+const SYNC_DEFAULT = 0x0;
+const SYNC_ALLOW_DISABLE = 0x1;
+function SynchronizeForSnapshot(flags)
+{
+ if (gCurrentTestType == TYPE_SCRIPT ||
+ gCurrentTestType == TYPE_LOAD) {
+ // Script tests or load-only tests do not need any snapshotting
+ return;
+ }
+
+ if (flags & SYNC_ALLOW_DISABLE) {
+ var docElt = content.document.documentElement;
+ if (docElt && docElt.hasAttribute("reftest-no-sync-layers")) {
+ LogInfo("Test file chose to skip SynchronizeForSnapshot");
+ return;
+ }
+ }
+
+ windowUtils().updateLayerTree();
+
+ // Setup async scroll offsets now, because any scrollable layers should
+ // have had their AsyncPanZoomControllers created.
+ setupAsyncScrollOffsets({allowFailure:false});
+ setupAsyncZoom({allowFailure:false});
+}
+
+function RegisterMessageListeners()
+{
+ addMessageListener(
+ "reftest:Clear",
+ function (m) { RecvClear() }
+ );
+ addMessageListener(
+ "reftest:LoadScriptTest",
+ function (m) { RecvLoadScriptTest(m.json.uri, m.json.timeout); }
+ );
+ addMessageListener(
+ "reftest:LoadTest",
+ function (m) { RecvLoadTest(m.json.type, m.json.uri, m.json.timeout); }
+ );
+ addMessageListener(
+ "reftest:ResetRenderingState",
+ function (m) { RecvResetRenderingState(); }
+ );
+}
+
+function RecvClear()
+{
+ gClearingForAssertionCheck = true;
+ LoadURI(BLANK_URL_FOR_CLEARING);
+}
+
+function RecvLoadTest(type, uri, timeout)
+{
+ StartTestURI(type, uri, timeout);
+}
+
+function RecvLoadScriptTest(uri, timeout)
+{
+ StartTestURI(TYPE_SCRIPT, uri, timeout);
+}
+
+function RecvResetRenderingState()
+{
+ resetZoom();
+ resetDisplayportAndViewport();
+}
+
+function SendAssertionCount(numAssertions)
+{
+ sendAsyncMessage("reftest:AssertionCount", { count: numAssertions });
+}
+
+function SendContentReady()
+{
+ let gfxInfo = (NS_GFXINFO_CONTRACTID in CC) && CC[NS_GFXINFO_CONTRACTID].getService(CI.nsIGfxInfo);
+ let info = gfxInfo.getInfo();
+ try {
+ info.D2DEnabled = gfxInfo.D2DEnabled;
+ info.DWriteEnabled = gfxInfo.DWriteEnabled;
+ } catch (e) {
+ info.D2DEnabled = false;
+ info.DWriteEnabled = false;
+ }
+
+ return sendSyncMessage("reftest:ContentReady", { 'gfx': info })[0];
+}
+
+function SendException(what)
+{
+ sendAsyncMessage("reftest:Exception", { what: what });
+}
+
+function SendFailedLoad(why)
+{
+ sendAsyncMessage("reftest:FailedLoad", { why: why });
+}
+
+function SendFailedNoPaint()
+{
+ sendAsyncMessage("reftest:FailedNoPaint");
+}
+
+function SendFailedOpaqueLayer(why)
+{
+ sendAsyncMessage("reftest:FailedOpaqueLayer", { why: why });
+}
+
+function SendFailedAssignedLayer(why)
+{
+ sendAsyncMessage("reftest:FailedAssignedLayer", { why: why });
+}
+
+// Return true if a snapshot was taken.
+function SendInitCanvasWithSnapshot()
+{
+ // If we're in the same process as the top-level XUL window, then
+ // drawing that window will also update our layers, so no
+ // synchronization is needed.
+ //
+ // NB: this is a test-harness optimization only, it must not
+ // affect the validity of the tests.
+ if (gBrowserIsRemote) {
+ SynchronizeForSnapshot(SYNC_DEFAULT);
+ }
+
+ // For in-process browser, we have to make a synchronous request
+ // here to make the above optimization valid, so that MozWaitPaint
+ // events dispatched (synchronously) during painting are received
+ // before we check the paint-wait counter. For out-of-process
+ // browser though, it doesn't wrt correctness whether this request
+ // is sync or async.
+ var ret = sendSyncMessage("reftest:InitCanvasWithSnapshot")[0];
+
+ gHaveCanvasSnapshot = ret.painted;
+ return ret.painted;
+}
+
+function SendScriptResults(runtimeMs, error, results)
+{
+ sendAsyncMessage("reftest:ScriptResults",
+ { runtimeMs: runtimeMs, error: error, results: results });
+}
+
+function SendExpectProcessCrash(runtimeMs)
+{
+ sendAsyncMessage("reftest:ExpectProcessCrash");
+}
+
+function SendTestDone(runtimeMs)
+{
+ sendAsyncMessage("reftest:TestDone", { runtimeMs: runtimeMs });
+}
+
+function roundTo(x, fraction)
+{
+ return Math.round(x/fraction)*fraction;
+}
+
+function elementDescription(element)
+{
+ return '<' + element.localName +
+ [].slice.call(element.attributes).map((attr) =>
+ ` ${attr.nodeName}="${attr.value}"`).join('') +
+ '>';
+}
+
+function SendUpdateCanvasForEvent(event, contentRootElement)
+{
+ var win = content;
+ var scale = markupDocumentViewer().fullZoom;
+
+ var rects = [ ];
+ if (shouldSnapshotWholePage(contentRootElement)) {
+ // See comments in SendInitCanvasWithSnapshot() re: the split
+ // logic here.
+ if (!gBrowserIsRemote) {
+ sendSyncMessage("reftest:UpdateWholeCanvasForInvalidation");
+ } else {
+ SynchronizeForSnapshot(SYNC_ALLOW_DISABLE);
+ sendAsyncMessage("reftest:UpdateWholeCanvasForInvalidation");
+ }
+ return;
+ }
+
+ var rectList = event.clientRects;
+ LogInfo("SendUpdateCanvasForEvent with " + rectList.length + " rects");
+ for (var i = 0; i < rectList.length; ++i) {
+ var r = rectList[i];
+ // Set left/top/right/bottom to "device pixel" boundaries
+ var left = Math.floor(roundTo(r.left*scale, 0.001));
+ var top = Math.floor(roundTo(r.top*scale, 0.001));
+ var right = Math.ceil(roundTo(r.right*scale, 0.001));
+ var bottom = Math.ceil(roundTo(r.bottom*scale, 0.001));
+ LogInfo("Rect: " + left + " " + top + " " + right + " " + bottom);
+
+ rects.push({ left: left, top: top, right: right, bottom: bottom });
+ }
+
+ // See comments in SendInitCanvasWithSnapshot() re: the split
+ // logic here.
+ if (!gBrowserIsRemote) {
+ sendSyncMessage("reftest:UpdateCanvasForInvalidation", { rects: rects });
+ } else {
+ SynchronizeForSnapshot(SYNC_ALLOW_DISABLE);
+ sendAsyncMessage("reftest:UpdateCanvasForInvalidation", { rects: rects });
+ }
+}
+#if REFTEST_B2G
+OnInitialLoad();
+#else
+if (content.document.readyState == "complete") {
+ // load event has already fired for content, get started
+ OnInitialLoad();
+} else {
+ addEventListener("load", OnInitialLoad, true);
+}
+#endif
diff --git a/layout/tools/reftest/reftest-preferences.js b/layout/tools/reftest/reftest-preferences.js
new file mode 100644
index 000000000..7922c24d5
--- /dev/null
+++ b/layout/tools/reftest/reftest-preferences.js
@@ -0,0 +1,127 @@
+// For mochitests, we're more interested in testing the behavior of in-
+// content XBL bindings, so we set this pref to true. In reftests, we're
+// more interested in testing the behavior of XBL as it works in chrome,
+// so we want this pref to be false.
+user_pref("dom.use_xbl_scopes_for_remote_xul", false);
+user_pref("gfx.color_management.mode", 2);
+user_pref("gfx.color_management.force_srgb", true);
+user_pref("gfx.logging.level", 1);
+user_pref("browser.dom.window.dump.enabled", true);
+user_pref("ui.caretBlinkTime", -1);
+user_pref("dom.send_after_paint_to_content", true);
+// no slow script dialogs
+user_pref("dom.max_script_run_time", 0);
+user_pref("dom.max_chrome_script_run_time", 0);
+user_pref("hangmonitor.timeout", 0);
+// Ensure autoplay is enabled for all platforms.
+user_pref("media.autoplay.enabled", true);
+// Disable updates
+user_pref("app.update.enabled", false);
+user_pref("app.update.staging.enabled", false);
+user_pref("app.update.url.android", "");
+// Disable addon updates and prefetching so we don't leak them
+user_pref("extensions.update.enabled", false);
+user_pref("extensions.systemAddon.update.url", "http://localhost/dummy-system-addons.xml");
+user_pref("extensions.getAddons.cache.enabled", false);
+// Disable blocklist updates so we don't have them reported as leaks
+user_pref("extensions.blocklist.enabled", false);
+// Make url-classifier updates so rare that they won't affect tests
+user_pref("urlclassifier.updateinterval", 172800);
+// Disable downscale-during-decode, since it makes reftests more difficult.
+user_pref("image.downscale-during-decode.enabled", false);
+// Checking whether two files are the same is slow on Windows.
+// Setting this pref makes tests run much faster there.
+user_pref("security.fileuri.strict_origin_policy", false);
+// Disable the thumbnailing service
+user_pref("browser.pagethumbnails.capturing_disabled", true);
+// Since our tests are 800px wide, set the assume-designed-for width of all
+// pages to be 800px (instead of the default of 980px). This ensures that
+// in our 800px window we don't zoom out by default to try to fit the
+// assumed 980px content.
+user_pref("browser.viewport.desktopWidth", 800);
+// Disable the fade out (over time) of overlay scrollbars, since we
+// can't guarantee taking both reftest snapshots at the same point
+// during the fade.
+user_pref("layout.testing.overlay-scrollbars.always-visible", true);
+// Disable interruptible reflow since (1) it's normally not going to
+// happen, but (2) it might happen if we somehow end up with both
+// pending user events and clock skew. So to avoid having to change
+// MakeProgress to deal with waiting for interruptible reflows to
+// complete for a rare edge case, we just disable interruptible
+// reflow so that that rare edge case doesn't lead to reftest
+// failures.
+user_pref("layout.interruptible-reflow.enabled", false);
+
+// Tell the search service we are running in the US. This also has the
+// desired side-effect of preventing our geoip lookup.
+user_pref("browser.search.isUS", true);
+user_pref("browser.search.countryCode", "US");
+user_pref("browser.search.geoSpecificDefaults", false);
+
+// Make sure SelfSupport doesn't hit the network.
+user_pref("browser.selfsupport.url", "https://localhost/selfsupport-dummy/");
+
+// use about:blank, not browser.startup.homepage
+user_pref("browser.startup.page", 0);
+
+// Allow XUL and XBL files to be opened from file:// URIs
+user_pref("dom.allow_XUL_XBL_for_file", true);
+
+// Allow view-source URIs to be opened from URIs that share
+// their protocol with the inner URI of the view-source URI
+user_pref("security.view-source.reachable-from-inner-protocol", true);
+
+// Ensure that telemetry is disabled, so we don't connect to the telemetry
+// server in the middle of the tests.
+user_pref("toolkit.telemetry.enabled", false);
+user_pref("toolkit.telemetry.unified", false);
+// Likewise for safebrowsing.
+user_pref("browser.safebrowsing.phishing.enabled", false);
+user_pref("browser.safebrowsing.malware.enabled", false);
+user_pref("browser.safebrowsing.forbiddenURIs.enabled", false);
+user_pref("browser.safebrowsing.blockedURIs.enabled", false);
+// Likewise for tracking protection.
+user_pref("privacy.trackingprotection.enabled", false);
+user_pref("privacy.trackingprotection.pbmode.enabled", false);
+// And for snippets.
+user_pref("browser.snippets.enabled", false);
+user_pref("browser.snippets.syncPromo.enabled", false);
+user_pref("browser.snippets.firstrunHomepage.enabled", false);
+// And for useragent updates.
+user_pref("general.useragent.updates.enabled", false);
+// And for webapp updates. Yes, it is supposed to be an integer.
+user_pref("browser.webapps.checkForUpdates", 0);
+// And for about:newtab content fetch and pings.
+user_pref("browser.newtabpage.directory.source", "data:application/json,{\"reftest\":1}");
+user_pref("browser.newtabpage.directory.ping", "");
+// Only allow add-ons from the profile and app and allow foreign
+// injection
+user_pref("extensions.enabledScopes", 5);
+user_pref("extensions.autoDisableScopes", 0);
+// Allow unsigned add-ons
+user_pref("xpinstall.signatures.required", false);
+
+// Don't use auto-enabled e10s
+user_pref("browser.tabs.remote.autostart.1", false);
+user_pref("browser.tabs.remote.autostart.2", false);
+
+user_pref("startup.homepage_welcome_url", "");
+user_pref("startup.homepage_welcome_url.additional", "");
+user_pref("startup.homepage_override_url", "");
+
+user_pref("media.gmp-manager.url.override", "http://localhost/dummy-gmp-manager.xml");
+user_pref("media.gmp-manager.updateEnabled", false);
+
+// A fake bool pref for "@supports -moz-bool-pref" sanify test.
+user_pref("testing.supports.moz-bool-pref", true);
+
+// Reftests load a lot of URLs very quickly. This puts avoidable and
+// unnecessary I/O pressure on the Places DB (measured to be in the
+// gigabytes).
+user_pref("places.history.enabled", false);
+
+// For Firefox 52 only, ESR will support non-Flash plugins while release will
+// not, so we keep testing the non-Flash pathways
+user_pref("plugin.load_flash_only", false);
+
+user_pref("media.openUnsupportedTypeWithExternalApp", false);
diff --git a/layout/tools/reftest/reftest-to-html.pl b/layout/tools/reftest/reftest-to-html.pl
new file mode 100755
index 000000000..3fc2380e9
--- /dev/null
+++ b/layout/tools/reftest/reftest-to-html.pl
@@ -0,0 +1,118 @@
+#!/usr/bin/perl
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+print <<EOD
+<html>
+<head>
+<title>reftest output</title>
+<style type="text/css">
+/* must be in this order */
+.PASS { background-color: green; }
+.FAIL { background-color: red; }
+.XFAIL { background-color: #999300; }
+.WEIRDPASS { background-color: #00FFED; }
+.PASSRANDOM { background-color: #598930; }
+.FAILRANDOM, td.XFAILRANDOM { background-color: #99402A; }
+
+.FAILIMAGES { }
+img { margin: 5px; width: 80px; height: 100px; }
+img.testresult { border: 2px solid red; }
+img.testref { border: 2px solid green; }
+a { color: inherit; }
+.always { display: inline ! important; }
+</style>
+</head>
+<body>
+<p>
+<span class="PASS always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[0].style; if (s.display == 'none') s.display = null; else s.display = 'none';">PASS</span>&nbsp;
+<span class="FAIL always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[1].style; if (s.display == 'none') s.display = null; else s.display = 'none';">UNEXPECTED FAIL</span>&nbsp;
+<span class="XFAIL always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[2].style; if (s.display == 'none') s.display = null; else s.display = 'none';">KNOWN FAIL</span>&nbsp;
+<span class="WEIRDPASS always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[3].style; if (s.display == 'none') s.display = null; else s.display = 'none';">UNEXPECTED PASS</span>&nbsp;
+<span class="PASSRANDOM always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[4].style; if (s.display == 'none') s.display = null; else s.display = 'none';">PASS (Random)</span>&nbsp;
+<span class="FAILRANDOM always"><input type="checkbox" checked="true" onclick="var s = document.styleSheets[0].cssRules[5].style; if (s.display == 'none') s.display = null; else s.display = 'none';">FAIL (Random)</span>&nbsp;
+</p>
+<table>
+EOD
+;
+
+sub readcleanline {
+ my $l = <>;
+ chomp $l;
+ chop $l if ($l =~ /\r$/);
+ return $l;
+}
+
+sub do_html {
+ my ($l) = @_;
+
+ $l =~ s,(file:[^ ]*),<a href="\1">\1</a>,g;
+ $l =~ s,(data:[^ ]*),<a href="\1">\1</a>,g;
+
+ return $l;
+}
+
+$l = 0;
+
+while (<>) {
+ $l++;
+ next unless /^REFTEST/;
+
+ chomp;
+ chop if /\r$/;
+
+ s/^REFTEST *//;
+
+ my $randomresult = 0;
+ if (/EXPECTED RANDOM/) {
+ s/\(EXPECTED RANDOM\)//;
+ $randomresult = 1;
+ }
+
+ if (/^TEST-PASS \| (.*)$/) {
+ my $class = $randomresult ? "PASSRANDOM" : "PASS";
+ print '<tr><td class="' . $class . '">' . do_html($1) . "</td></tr>\n";
+ } elsif (/^TEST-UNEXPECTED-(....) \| (.*)$/) {
+ if ($randomresult) {
+ die "Error on line $l: UNEXPECTED with test marked random?!";
+ }
+ my $class = ($1 eq "PASS") ? "WEIRDPASS" : "FAIL";
+ print '<tr><td class="' . $class . '">' . do_html($2) . "</td></tr>\n";
+
+ # UNEXPECTED results can be followed by one or two images
+ $testline = &readcleanline;
+
+ print '<tr><td class="FAILIMAGES">';
+
+ if ($testline =~ /REFTEST IMAGE: (data:.*)$/) {
+ print '<a href="' . $1 . '"><img class="testresult" src="' . $1 . '"></a>';
+ } elsif ($testline =~ /REFTEST IMAGE 1 \(TEST\): (data:.*)$/) {
+ $refline = &readcleanline;
+ print '<a href="' . $1 . '"><img class="testresult" src="' . $1 . '"></a>';
+ {
+ die "Error on line $l" unless $refline =~ /REFTEST IMAGE 2 \(REFERENCE\): (data:.*)$/;
+ print '<a href="' . $1 . '"><img class="testref" src="' . $1 . '"></a>';
+ }
+
+ } else {
+ die "Error on line $l";
+ }
+
+ print "</td></tr>\n";
+ } elsif (/^TEST-KNOWN-FAIL \| (.*$)/) {
+ my $class = $randomresult ? "XFAILRANDOM" : "XFAIL";
+ print '<tr><td class="' . $class . '">' . do_html($1) . "</td></tr>\n";
+ } else {
+ print STDERR "Unknown Line: " . $_ . "\n";
+ print "<tr><td><pre>" . $_ . "</pre></td></tr>\n";
+ }
+}
+
+print <<EOD
+</table>
+</body>
+</html>
+EOD
+;
diff --git a/layout/tools/reftest/reftest.jsm b/layout/tools/reftest/reftest.jsm
new file mode 100644
index 000000000..69d804453
--- /dev/null
+++ b/layout/tools/reftest/reftest.jsm
@@ -0,0 +1,2112 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 4 -*- /
+/* vim: set shiftwidth=4 tabstop=8 autoindent cindent expandtab: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["OnRefTestLoad", "OnRefTestUnload"];
+
+var CC = Components.classes;
+const CI = Components.interfaces;
+const CR = Components.results;
+const CU = Components.utils;
+
+const XHTML_NS = "http://www.w3.org/1999/xhtml";
+const XUL_NS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
+
+const NS_LOCAL_FILE_CONTRACTID = "@mozilla.org/file/local;1";
+const NS_GFXINFO_CONTRACTID = "@mozilla.org/gfx/info;1";
+const IO_SERVICE_CONTRACTID = "@mozilla.org/network/io-service;1";
+const DEBUG_CONTRACTID = "@mozilla.org/xpcom/debug;1";
+const NS_LOCALFILEINPUTSTREAM_CONTRACTID =
+ "@mozilla.org/network/file-input-stream;1";
+const NS_SCRIPTSECURITYMANAGER_CONTRACTID =
+ "@mozilla.org/scriptsecuritymanager;1";
+const NS_REFTESTHELPER_CONTRACTID =
+ "@mozilla.org/reftest-helper;1";
+const NS_NETWORK_PROTOCOL_CONTRACTID_PREFIX =
+ "@mozilla.org/network/protocol;1?name=";
+const NS_XREAPPINFO_CONTRACTID =
+ "@mozilla.org/xre/app-info;1";
+const NS_DIRECTORY_SERVICE_CONTRACTID =
+ "@mozilla.org/file/directory_service;1";
+const NS_OBSERVER_SERVICE_CONTRACTID =
+ "@mozilla.org/observer-service;1";
+
+CU.import("resource://gre/modules/FileUtils.jsm");
+CU.import("chrome://reftest/content/httpd.jsm", this);
+CU.import("chrome://reftest/content/StructuredLog.jsm", this);
+CU.import("resource://gre/modules/Services.jsm");
+CU.import("resource://gre/modules/NetUtil.jsm");
+
+var gLoadTimeout = 0;
+var gTimeoutHook = null;
+var gRemote = false;
+var gIgnoreWindowSize = false;
+var gShuffle = false;
+var gRepeat = null;
+var gRunUntilFailure = false;
+var gTotalChunks = 0;
+var gThisChunk = 0;
+var gContainingWindow = null;
+var gURLFilterRegex = {};
+var gContentGfxInfo = null;
+const FOCUS_FILTER_ALL_TESTS = "all";
+const FOCUS_FILTER_NEEDS_FOCUS_TESTS = "needs-focus";
+const FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS = "non-needs-focus";
+var gFocusFilterMode = FOCUS_FILTER_ALL_TESTS;
+
+// "<!--CLEAR-->"
+const BLANK_URL_FOR_CLEARING = "data:text/html;charset=UTF-8,%3C%21%2D%2DCLEAR%2D%2D%3E";
+
+var gBrowser;
+// Are we testing web content loaded in a separate process?
+var gBrowserIsRemote; // bool
+var gB2GisMulet; // bool
+// Are we using <iframe mozbrowser>?
+var gBrowserIsIframe; // bool
+var gBrowserMessageManager;
+var gCanvas1, gCanvas2;
+// gCurrentCanvas is non-null between InitCurrentCanvasWithSnapshot and the next
+// RecordResult.
+var gCurrentCanvas = null;
+var gURLs;
+var gManifestsLoaded = {};
+// Map from URI spec to the number of times it remains to be used
+var gURIUseCounts;
+// Map from URI spec to the canvas rendered for that URI
+var gURICanvases;
+var gTestResults = {
+ // Successful...
+ Pass: 0,
+ LoadOnly: 0,
+ // Unexpected...
+ Exception: 0,
+ FailedLoad: 0,
+ UnexpectedFail: 0,
+ UnexpectedPass: 0,
+ AssertionUnexpected: 0,
+ AssertionUnexpectedFixed: 0,
+ // Known problems...
+ KnownFail : 0,
+ AssertionKnown: 0,
+ Random : 0,
+ Skip: 0,
+ Slow: 0,
+};
+var gTotalTests = 0;
+var gState;
+var gCurrentURL;
+var gTestLog = [];
+var gLogLevel;
+var gServer;
+var gCount = 0;
+var gAssertionCount = 0;
+
+var gIOService;
+var gDebug;
+var gWindowUtils;
+
+var gSlowestTestTime = 0;
+var gSlowestTestURL;
+var gFailedUseWidgetLayers = false;
+
+var gDrawWindowFlags;
+
+var gExpectingProcessCrash = false;
+var gExpectedCrashDumpFiles = [];
+var gUnexpectedCrashDumpFiles = { };
+var gCrashDumpDir;
+var gFailedNoPaint = false;
+var gFailedOpaqueLayer = false;
+var gFailedOpaqueLayerMessages = [];
+var gFailedAssignedLayer = false;
+var gFailedAssignedLayerMessages = [];
+
+// The enabled-state of the test-plugins, stored so they can be reset later
+var gTestPluginEnabledStates = null;
+
+const TYPE_REFTEST_EQUAL = '==';
+const TYPE_REFTEST_NOTEQUAL = '!=';
+const TYPE_LOAD = 'load'; // test without a reference (just test that it does
+ // not assert, crash, hang, or leak)
+const TYPE_SCRIPT = 'script'; // test contains individual test results
+
+// The order of these constants matters, since when we have a status
+// listed for a *manifest*, we combine the status with the status for
+// the test by using the *larger*.
+// FIXME: In the future, we may also want to use this rule for combining
+// statuses that are on the same line (rather than making the last one
+// win).
+const EXPECTED_PASS = 0;
+const EXPECTED_FAIL = 1;
+const EXPECTED_RANDOM = 2;
+const EXPECTED_DEATH = 3; // test must be skipped to avoid e.g. crash/hang
+const EXPECTED_FUZZY = 4;
+
+// types of preference value we might want to set for a specific test
+const PREF_BOOLEAN = 0;
+const PREF_STRING = 1;
+const PREF_INTEGER = 2;
+
+var gPrefsToRestore = [];
+
+const gProtocolRE = /^\w+:/;
+const gPrefItemRE = /^(|test-|ref-)pref\((.+?),(.*)\)$/;
+
+var gHttpServerPort = -1;
+
+// whether to run slow tests or not
+var gRunSlowTests = true;
+
+// whether we should skip caching canvases
+var gNoCanvasCache = false;
+
+var gRecycledCanvases = new Array();
+
+// Only dump the sandbox once, because it doesn't depend on the
+// manifest URL (yet!).
+var gDumpedConditionSandbox = false;
+
+function HasUnexpectedResult()
+{
+ return gTestResults.Exception > 0 ||
+ gTestResults.FailedLoad > 0 ||
+ gTestResults.UnexpectedFail > 0 ||
+ gTestResults.UnexpectedPass > 0 ||
+ gTestResults.AssertionUnexpected > 0 ||
+ gTestResults.AssertionUnexpectedFixed > 0;
+}
+
+// By default we just log to stdout
+var gLogFile = null;
+var gDumpFn = function(line) {
+ dump(line);
+ if (gLogFile) {
+ gLogFile.write(line, line.length);
+ }
+}
+var gDumpRawLog = function(record) {
+ // Dump JSON representation of data on a single line
+ var line = "\n" + JSON.stringify(record) + "\n";
+ dump(line);
+
+ if (gLogFile) {
+ gLogFile.write(line, line.length);
+ }
+}
+var logger = new StructuredLogger('reftest', gDumpRawLog);
+
+function TestBuffer(str)
+{
+ logger.debug(str);
+ gTestLog.push(str);
+}
+
+function FlushTestBuffer()
+{
+ // In debug mode, we've dumped all these messages already.
+ if (gLogLevel !== 'debug') {
+ for (var i = 0; i < gTestLog.length; ++i) {
+ logger.info("Saved log: " + gTestLog[i]);
+ }
+ }
+ gTestLog = [];
+}
+
+function LogWidgetLayersFailure()
+{
+ logger.error("USE_WIDGET_LAYERS disabled because the screen resolution is too low. This falls back to an alternate rendering path (that may not be representative) and is not implemented with e10s enabled.");
+ logger.error("Consider increasing your screen resolution, or adding '--disable-e10s' to your './mach reftest' command");
+}
+
+function AllocateCanvas()
+{
+ if (gRecycledCanvases.length > 0) {
+ return gRecycledCanvases.shift();
+ }
+
+ var canvas = gContainingWindow.document.createElementNS(XHTML_NS, "canvas");
+ var r = gBrowser.getBoundingClientRect();
+ canvas.setAttribute("width", Math.ceil(r.width));
+ canvas.setAttribute("height", Math.ceil(r.height));
+
+ return canvas;
+}
+
+function ReleaseCanvas(canvas)
+{
+ // store a maximum of 2 canvases, if we're not caching
+ if (!gNoCanvasCache || gRecycledCanvases.length < 2) {
+ gRecycledCanvases.push(canvas);
+ }
+}
+
+function IDForEventTarget(event)
+{
+ try {
+ return "'" + event.target.getAttribute('id') + "'";
+ } catch (ex) {
+ return "<unknown>";
+ }
+}
+
+function getTestPlugin(aName) {
+ var ph = CC["@mozilla.org/plugin/host;1"].getService(CI.nsIPluginHost);
+ var tags = ph.getPluginTags();
+
+ // Find the test plugin
+ for (var i = 0; i < tags.length; i++) {
+ if (tags[i].name == aName)
+ return tags[i];
+ }
+
+ logger.warning("Failed to find the test-plugin.");
+ return null;
+}
+
+this.OnRefTestLoad = function OnRefTestLoad(win)
+{
+ gCrashDumpDir = CC[NS_DIRECTORY_SERVICE_CONTRACTID]
+ .getService(CI.nsIProperties)
+ .get("ProfD", CI.nsIFile);
+ gCrashDumpDir.append("minidumps");
+
+ var env = CC["@mozilla.org/process/environment;1"].
+ getService(CI.nsIEnvironment);
+
+ var prefs = Components.classes["@mozilla.org/preferences-service;1"].
+ getService(Components.interfaces.nsIPrefBranch);
+ try {
+ gBrowserIsRemote = prefs.getBoolPref("browser.tabs.remote.autostart");
+ } catch (e) {
+ gBrowserIsRemote = false;
+ }
+
+ try {
+ gB2GisMulet = prefs.getBoolPref("b2g.is_mulet");
+ } catch (e) {
+ gB2GisMulet = false;
+ }
+
+ try {
+ gBrowserIsIframe = prefs.getBoolPref("reftest.browser.iframe.enabled");
+ } catch (e) {
+ gBrowserIsIframe = false;
+ }
+
+ try {
+ gLogLevel = prefs.getCharPref("reftest.logLevel");
+ } catch (e) {
+ gLogLevel ='info';
+ }
+
+ if (win === undefined || win == null) {
+ win = window;
+ }
+ if (gContainingWindow == null && win != null) {
+ gContainingWindow = win;
+ }
+
+ if (gBrowserIsIframe) {
+ gBrowser = gContainingWindow.document.createElementNS(XHTML_NS, "iframe");
+ gBrowser.setAttribute("mozbrowser", "");
+ gBrowser.setAttribute("mozapp", prefs.getCharPref("b2g.system_manifest_url"));
+ } else {
+ gBrowser = gContainingWindow.document.createElementNS(XUL_NS, "xul:browser");
+ }
+ gBrowser.setAttribute("id", "browser");
+ gBrowser.setAttribute("type", "content-primary");
+ gBrowser.setAttribute("remote", gBrowserIsRemote ? "true" : "false");
+ // Make sure the browser element is exactly 800x1000, no matter
+ // what size our window is
+ gBrowser.setAttribute("style", "padding: 0px; margin: 0px; border:none; min-width: 800px; min-height: 1000px; max-width: 800px; max-height: 1000px");
+
+ if (Services.appinfo.OS == "Android") {
+ let doc;
+ if (Services.appinfo.widgetToolkit == "gonk") {
+ doc = gContainingWindow.document.getElementsByTagName("html")[0];
+ } else {
+ doc = gContainingWindow.document.getElementById('main-window');
+ }
+ while (doc.hasChildNodes()) {
+ doc.removeChild(doc.firstChild);
+ }
+ doc.appendChild(gBrowser);
+ } else {
+ document.getElementById("reftest-window").appendChild(gBrowser);
+ }
+
+ // reftests should have the test plugins enabled, not click-to-play
+ let plugin1 = getTestPlugin("Test Plug-in");
+ let plugin2 = getTestPlugin("Second Test Plug-in");
+ if (plugin1 && plugin2) {
+ gTestPluginEnabledStates = [plugin1.enabledState, plugin2.enabledState];
+ plugin1.enabledState = CI.nsIPluginTag.STATE_ENABLED;
+ plugin2.enabledState = CI.nsIPluginTag.STATE_ENABLED;
+ } else {
+ logger.warning("Could not get test plugin tags.");
+ }
+
+ gBrowserMessageManager = gBrowser.QueryInterface(CI.nsIFrameLoaderOwner)
+ .frameLoader.messageManager;
+ // The content script waits for the initial onload, then notifies
+ // us.
+ RegisterMessageListenersAndLoadContentScript();
+}
+
+function InitAndStartRefTests()
+{
+ /* These prefs are optional, so we don't need to spit an error to the log */
+ try {
+ var prefs = Components.classes["@mozilla.org/preferences-service;1"].
+ getService(Components.interfaces.nsIPrefBranch);
+ } catch(e) {
+ logger.error("EXCEPTION: " + e);
+ }
+
+ try {
+ prefs.setBoolPref("android.widget_paints_background", false);
+ } catch (e) {}
+
+ /* set the gLoadTimeout */
+ try {
+ gLoadTimeout = prefs.getIntPref("reftest.timeout");
+ } catch(e) {
+ gLoadTimeout = 5 * 60 * 1000; //5 minutes as per bug 479518
+ }
+
+ /* Get the logfile for android tests */
+ try {
+ var logFile = prefs.getCharPref("reftest.logFile");
+ if (logFile) {
+ var f = FileUtils.File(logFile);
+ gLogFile = FileUtils.openFileOutputStream(f, FileUtils.MODE_WRONLY | FileUtils.MODE_CREATE);
+ }
+ } catch(e) {}
+
+ try {
+ gRemote = prefs.getBoolPref("reftest.remote");
+ } catch(e) {
+ gRemote = false;
+ }
+
+ try {
+ gIgnoreWindowSize = prefs.getBoolPref("reftest.ignoreWindowSize");
+ } catch(e) {
+ gIgnoreWindowSize = false;
+ }
+
+ /* Support for running a chunk (subset) of tests. In separate try as this is optional */
+ try {
+ gTotalChunks = prefs.getIntPref("reftest.totalChunks");
+ gThisChunk = prefs.getIntPref("reftest.thisChunk");
+ }
+ catch(e) {
+ gTotalChunks = 0;
+ gThisChunk = 0;
+ }
+
+ try {
+ gFocusFilterMode = prefs.getCharPref("reftest.focusFilterMode");
+ } catch(e) {}
+
+ gWindowUtils = gContainingWindow.QueryInterface(CI.nsIInterfaceRequestor).getInterface(CI.nsIDOMWindowUtils);
+ if (!gWindowUtils || !gWindowUtils.compareCanvases)
+ throw "nsIDOMWindowUtils inteface missing";
+
+ gIOService = CC[IO_SERVICE_CONTRACTID].getService(CI.nsIIOService);
+ gDebug = CC[DEBUG_CONTRACTID].getService(CI.nsIDebug2);
+
+ RegisterProcessCrashObservers();
+
+ if (gRemote) {
+ gServer = null;
+ } else {
+ gServer = new HttpServer();
+ }
+ try {
+ if (gServer)
+ StartHTTPServer();
+ } catch (ex) {
+ //gBrowser.loadURI('data:text/plain,' + ex);
+ ++gTestResults.Exception;
+ logger.error("EXCEPTION: " + ex);
+ DoneTests();
+ }
+
+ // Focus the content browser.
+ if (gFocusFilterMode != FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS) {
+ gBrowser.focus();
+ }
+
+ StartTests();
+}
+
+function StartHTTPServer()
+{
+ gServer.registerContentType("sjs", "sjs");
+ gServer.start(-1);
+ gHttpServerPort = gServer.identity.primaryPort;
+}
+
+// Perform a Fisher-Yates shuffle of the array.
+function Shuffle(array)
+{
+ for (var i = array.length - 1; i > 0; i--) {
+ var j = Math.floor(Math.random() * (i + 1));
+ var temp = array[i];
+ array[i] = array[j];
+ array[j] = temp;
+ }
+}
+
+function StartTests()
+{
+ var manifests;
+ /* These prefs are optional, so we don't need to spit an error to the log */
+ try {
+ var prefs = Components.classes["@mozilla.org/preferences-service;1"].
+ getService(Components.interfaces.nsIPrefBranch);
+ } catch(e) {
+ logger.error("EXCEPTION: " + e);
+ }
+
+ try {
+ gNoCanvasCache = prefs.getIntPref("reftest.nocache");
+ } catch(e) {
+ gNoCanvasCache = false;
+ }
+
+ try {
+ gShuffle = prefs.getBoolPref("reftest.shuffle");
+ } catch (e) {
+ gShuffle = false;
+ }
+
+ try {
+ gRunUntilFailure = prefs.getBoolPref("reftest.runUntilFailure");
+ } catch (e) {
+ gRunUntilFailure = false;
+ }
+
+ // When we repeat this function is called again, so really only want to set
+ // gRepeat once.
+ if (gRepeat == null) {
+ try {
+ gRepeat = prefs.getIntPref("reftest.repeat");
+ } catch (e) {
+ gRepeat = 0;
+ }
+ }
+
+ try {
+ gRunSlowTests = prefs.getIntPref("reftest.skipslowtests");
+ } catch(e) {
+ gRunSlowTests = false;
+ }
+
+ if (gShuffle) {
+ gNoCanvasCache = true;
+ }
+
+ gURLs = [];
+ gManifestsLoaded = {};
+
+ try {
+ var manifests = JSON.parse(prefs.getCharPref("reftest.manifests"));
+ gURLFilterRegex = manifests[null];
+ } catch(e) {
+ logger.error("Unable to find reftest.manifests pref. Please ensure your profile is setup properly");
+ DoneTests();
+ }
+
+ try {
+ var globalFilter = manifests.hasOwnProperty("") ? new RegExp(manifests[""]) : null;
+ var manifestURLs = Object.keys(manifests);
+
+ // Ensure we read manifests from higher up the directory tree first so that we
+ // process includes before reading the included manifest again
+ manifestURLs.sort(function(a,b) {return a.length - b.length})
+ manifestURLs.forEach(function(manifestURL) {
+ logger.info("Reading manifest " + manifestURL);
+ var filter = manifests[manifestURL] ? new RegExp(manifests[manifestURL]) : null;
+ ReadTopManifest(manifestURL, [globalFilter, filter, false]);
+ });
+ BuildUseCounts();
+
+ // Filter tests which will be skipped to get a more even distribution when chunking
+ // tURLs is a temporary array containing all active tests
+ var tURLs = new Array();
+ var tIDs = new Array();
+ for (var i = 0; i < gURLs.length; ++i) {
+ if (gURLs[i].expected == EXPECTED_DEATH)
+ continue;
+
+ if (gURLs[i].needsFocus && !Focus())
+ continue;
+
+ if (gURLs[i].slow && !gRunSlowTests)
+ continue;
+
+ tURLs.push(gURLs[i]);
+ tIDs.push(gURLs[i].identifier);
+ }
+
+ logger.suiteStart(tIDs, {"skipped": gURLs.length - tURLs.length});
+
+ if (gTotalChunks > 0 && gThisChunk > 0) {
+ // Calculate start and end indices of this chunk if tURLs array were
+ // divided evenly
+ var testsPerChunk = tURLs.length / gTotalChunks;
+ var start = Math.round((gThisChunk-1) * testsPerChunk);
+ var end = Math.round(gThisChunk * testsPerChunk);
+
+ // Map these indices onto the gURLs array. This avoids modifying the
+ // gURLs array which prevents skipped tests from showing up in the log
+ start = gThisChunk == 1 ? 0 : gURLs.indexOf(tURLs[start]);
+ end = gThisChunk == gTotalChunks ? gURLs.length : gURLs.indexOf(tURLs[end + 1]) - 1;
+ gURLs = gURLs.slice(start, end);
+
+ logger.info("Running chunk " + gThisChunk + " out of " + gTotalChunks + " chunks. " +
+ "tests " + (start+1) + "-" + end + "/" + gURLs.length);
+ }
+
+ if (gShuffle) {
+ Shuffle(gURLs);
+ }
+
+ gTotalTests = gURLs.length;
+
+ if (!gTotalTests)
+ throw "No tests to run";
+
+ gURICanvases = {};
+ StartCurrentTest();
+ } catch (ex) {
+ //gBrowser.loadURI('data:text/plain,' + ex);
+ ++gTestResults.Exception;
+ logger.error("EXCEPTION: " + ex);
+ DoneTests();
+ }
+}
+
+function OnRefTestUnload()
+{
+ let plugin1 = getTestPlugin("Test Plug-in");
+ let plugin2 = getTestPlugin("Second Test Plug-in");
+ if (plugin1 && plugin2) {
+ plugin1.enabledState = gTestPluginEnabledStates[0];
+ plugin2.enabledState = gTestPluginEnabledStates[1];
+ } else {
+ logger.warning("Failed to get test plugin tags.");
+ }
+}
+
+// Read all available data from an input stream and return it
+// as a string.
+function getStreamContent(inputStream)
+{
+ var streamBuf = "";
+ var sis = CC["@mozilla.org/scriptableinputstream;1"].
+ createInstance(CI.nsIScriptableInputStream);
+ sis.init(inputStream);
+
+ var available;
+ while ((available = sis.available()) != 0) {
+ streamBuf += sis.read(available);
+ }
+
+ return streamBuf;
+}
+
+// Build the sandbox for fails-if(), etc., condition evaluation.
+function BuildConditionSandbox(aURL) {
+ var sandbox = new Components.utils.Sandbox(aURL.spec);
+ var xr = CC[NS_XREAPPINFO_CONTRACTID].getService(CI.nsIXULRuntime);
+ var appInfo = CC[NS_XREAPPINFO_CONTRACTID].getService(CI.nsIXULAppInfo);
+ sandbox.isDebugBuild = gDebug.isDebugBuild;
+
+ // xr.XPCOMABI throws exception for configurations without full ABI
+ // support (mobile builds on ARM)
+ var XPCOMABI = "";
+ try {
+ XPCOMABI = xr.XPCOMABI;
+ } catch(e) {}
+
+ sandbox.xulRuntime = CU.cloneInto({widgetToolkit: xr.widgetToolkit, OS: xr.OS, XPCOMABI: XPCOMABI}, sandbox);
+
+ var testRect = gBrowser.getBoundingClientRect();
+ sandbox.smallScreen = false;
+ if (gContainingWindow.innerWidth < 800 || gContainingWindow.innerHeight < 1000) {
+ sandbox.smallScreen = true;
+ }
+
+ var gfxInfo = (NS_GFXINFO_CONTRACTID in CC) && CC[NS_GFXINFO_CONTRACTID].getService(CI.nsIGfxInfo);
+ let readGfxInfo = function (obj, key) {
+ if (gContentGfxInfo && (key in gContentGfxInfo)) {
+ return gContentGfxInfo[key];
+ }
+ return obj[key];
+ }
+
+ try {
+ sandbox.d2d = readGfxInfo(gfxInfo, "D2DEnabled");
+ sandbox.dwrite = readGfxInfo(gfxInfo, "DWriteEnabled");
+ } catch (e) {
+ sandbox.d2d = false;
+ sandbox.dwrite = false;
+ }
+
+ var info = gfxInfo.getInfo();
+ var canvasBackend = readGfxInfo(info, "AzureCanvasBackend");
+ var contentBackend = readGfxInfo(info, "AzureContentBackend");
+ var canvasAccelerated = readGfxInfo(info, "AzureCanvasAccelerated");
+
+ sandbox.azureCairo = canvasBackend == "cairo";
+ sandbox.azureQuartz = canvasBackend == "quartz";
+ sandbox.azureSkia = canvasBackend == "skia";
+ sandbox.skiaContent = contentBackend == "skia";
+ sandbox.azureSkiaGL = canvasAccelerated; // FIXME: assumes GL right now
+ // true if we are using the same Azure backend for rendering canvas and content
+ sandbox.contentSameGfxBackendAsCanvas = contentBackend == canvasBackend
+ || (contentBackend == "none" && canvasBackend == "cairo");
+
+ sandbox.layersGPUAccelerated =
+ gWindowUtils.layerManagerType != "Basic";
+ sandbox.d3d11 =
+ gWindowUtils.layerManagerType == "Direct3D 11";
+ sandbox.d3d9 =
+ gWindowUtils.layerManagerType == "Direct3D 9";
+ sandbox.layersOpenGL =
+ gWindowUtils.layerManagerType == "OpenGL";
+ sandbox.layersOMTC =
+ gWindowUtils.layerManagerRemote == true;
+
+ // Shortcuts for widget toolkits.
+ sandbox.B2G = xr.widgetToolkit == "gonk";
+ sandbox.Android = xr.OS == "Android" && !sandbox.B2G;
+ sandbox.cocoaWidget = xr.widgetToolkit == "cocoa";
+ sandbox.gtkWidget = xr.widgetToolkit == "gtk2"
+ || xr.widgetToolkit == "gtk3";
+ sandbox.qtWidget = xr.widgetToolkit == "qt";
+ sandbox.winWidget = xr.widgetToolkit == "windows";
+
+ // Scrollbars that are semi-transparent. See bug 1169666.
+ sandbox.transparentScrollbars = xr.widgetToolkit == "gtk3";
+
+ if (sandbox.Android) {
+ var sysInfo = CC["@mozilla.org/system-info;1"].getService(CI.nsIPropertyBag2);
+
+ // This is currently used to distinguish Android 4.0.3 (SDK version 15)
+ // and later from Android 2.x
+ sandbox.AndroidVersion = sysInfo.getPropertyAsInt32("version");
+ }
+
+#if MOZ_ASAN
+ sandbox.AddressSanitizer = true;
+#else
+ sandbox.AddressSanitizer = false;
+#endif
+
+#if MOZ_WEBRTC
+ sandbox.webrtc = true;
+#else
+ sandbox.webrtc = false;
+#endif
+
+ var hh = CC[NS_NETWORK_PROTOCOL_CONTRACTID_PREFIX + "http"].
+ getService(CI.nsIHttpProtocolHandler);
+ var httpProps = ["userAgent", "appName", "appVersion", "vendor",
+ "vendorSub", "product", "productSub", "platform",
+ "oscpu", "language", "misc"];
+ sandbox.http = new sandbox.Object();
+ httpProps.forEach((x) => sandbox.http[x] = hh[x]);
+
+ // Set OSX to be the Mac OS X version, as an integer, or undefined
+ // for other platforms. The integer is formed by 100 times the
+ // major version plus the minor version, so 1006 for 10.6, 1010 for
+ // 10.10, etc.
+ var osxmatch = /Mac OS X (\d+).(\d+)$/.exec(hh.oscpu);
+ sandbox.OSX = osxmatch ? parseInt(osxmatch[1]) * 100 + parseInt(osxmatch[2]) : undefined;
+
+ // see if we have the test plugin available,
+ // and set a sandox prop accordingly
+ var navigator = gContainingWindow.navigator;
+ var testPlugin = navigator.plugins["Test Plug-in"];
+ sandbox.haveTestPlugin = !!testPlugin;
+
+ // Set a flag on sandbox if the windows default theme is active
+ sandbox.windowsDefaultTheme = gContainingWindow.matchMedia("(-moz-windows-default-theme)").matches;
+
+ var prefs = CC["@mozilla.org/preferences-service;1"].
+ getService(CI.nsIPrefBranch);
+ try {
+ sandbox.nativeThemePref = !prefs.getBoolPref("mozilla.widget.disable-native-theme");
+ } catch (e) {
+ sandbox.nativeThemePref = true;
+ }
+
+ sandbox.prefs = CU.cloneInto({
+ getBoolPref: function(p) { return prefs.getBoolPref(p); },
+ getIntPref: function(p) { return prefs.getIntPref(p); }
+ }, sandbox, { cloneFunctions: true });
+
+ // Tests shouldn't care about this except for when they need to
+ // crash the content process
+ sandbox.browserIsRemote = gBrowserIsRemote;
+ sandbox.Mulet = gB2GisMulet;
+
+ try {
+ sandbox.asyncPan = gContainingWindow.document.docShell.asyncPanZoomEnabled;
+ } catch (e) {
+ sandbox.asyncPan = false;
+ }
+
+ if (!gDumpedConditionSandbox) {
+ logger.info("Dumping JSON representation of sandbox");
+ logger.info(JSON.stringify(CU.waiveXrays(sandbox)));
+ gDumpedConditionSandbox = true;
+ }
+
+ // Graphics features
+ sandbox.usesRepeatResampling = sandbox.d2d;
+ return sandbox;
+}
+
+function AddPrefSettings(aWhere, aPrefName, aPrefValExpression, aSandbox, aTestPrefSettings, aRefPrefSettings)
+{
+ var prefVal = Components.utils.evalInSandbox("(" + aPrefValExpression + ")", aSandbox);
+ var prefType;
+ var valType = typeof(prefVal);
+ if (valType == "boolean") {
+ prefType = PREF_BOOLEAN;
+ } else if (valType == "string") {
+ prefType = PREF_STRING;
+ } else if (valType == "number" && (parseInt(prefVal) == prefVal)) {
+ prefType = PREF_INTEGER;
+ } else {
+ return false;
+ }
+ var setting = { name: aPrefName,
+ type: prefType,
+ value: prefVal };
+ if (aWhere != "ref-") {
+ aTestPrefSettings.push(setting);
+ }
+ if (aWhere != "test-") {
+ aRefPrefSettings.push(setting);
+ }
+ return true;
+}
+
+function ReadTopManifest(aFileURL, aFilter)
+{
+ var url = gIOService.newURI(aFileURL, null, null);
+ if (!url)
+ throw "Expected a file or http URL for the manifest.";
+ ReadManifest(url, EXPECTED_PASS, aFilter);
+}
+
+function AddTestItem(aTest, aFilter)
+{
+ if (!aFilter)
+ aFilter = [null, [], false];
+
+ globalFilter = aFilter[0];
+ manifestFilter = aFilter[1];
+ invertManifest = aFilter[2];
+ if ((globalFilter && !globalFilter.test(aTest.url1.spec)) ||
+ (manifestFilter &&
+ !(invertManifest ^ manifestFilter.test(aTest.url1.spec))))
+ return;
+ if (gFocusFilterMode == FOCUS_FILTER_NEEDS_FOCUS_TESTS &&
+ !aTest.needsFocus)
+ return;
+ if (gFocusFilterMode == FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS &&
+ aTest.needsFocus)
+ return;
+
+ if (aTest.url2 !== null)
+ aTest.identifier = [aTest.prettyPath, aTest.type, aTest.url2.spec];
+ else
+ aTest.identifier = aTest.prettyPath;
+
+ gURLs.push(aTest);
+}
+
+// Note: If you materially change the reftest manifest parsing,
+// please keep the parser in print-manifest-dirs.py in sync.
+function ReadManifest(aURL, inherited_status, aFilter)
+{
+ // Ensure each manifest is only read once. This assumes that manifests that are
+ // included with an unusual inherited_status or filters will be read via their
+ // include before they are read directly in the case of a duplicate
+ if (gManifestsLoaded.hasOwnProperty(aURL.spec)) {
+ if (gManifestsLoaded[aURL.spec] === null)
+ return;
+ else
+ aFilter = [aFilter[0], aFilter[1], true];
+ }
+ gManifestsLoaded[aURL.spec] = aFilter[1];
+
+ var secMan = CC[NS_SCRIPTSECURITYMANAGER_CONTRACTID]
+ .getService(CI.nsIScriptSecurityManager);
+
+ var listURL = aURL;
+ var channel = NetUtil.newChannel({uri: aURL, loadUsingSystemPrincipal: true});
+ var inputStream = channel.open2();
+ if (channel instanceof Components.interfaces.nsIHttpChannel
+ && channel.responseStatus != 200) {
+ logger.error("HTTP ERROR : " + channel.responseStatus);
+ }
+ var streamBuf = getStreamContent(inputStream);
+ inputStream.close();
+ var lines = streamBuf.split(/\n|\r|\r\n/);
+
+ // Build the sandbox for fails-if(), etc., condition evaluation.
+ var sandbox = BuildConditionSandbox(aURL);
+ var lineNo = 0;
+ var urlprefix = "";
+ var defaultTestPrefSettings = [], defaultRefPrefSettings = [];
+ for (var str of lines) {
+ ++lineNo;
+ if (str.charAt(0) == "#")
+ continue; // entire line was a comment
+ var i = str.search(/\s+#/);
+ if (i >= 0)
+ str = str.substring(0, i);
+ // strip leading and trailing whitespace
+ str = str.replace(/^\s*/, '').replace(/\s*$/, '');
+ if (!str || str == "")
+ continue;
+ var items = str.split(/\s+/); // split on whitespace
+
+ if (items[0] == "url-prefix") {
+ if (items.length != 2)
+ throw "url-prefix requires one url in manifest file " + aURL.spec + " line " + lineNo;
+ urlprefix = items[1];
+ continue;
+ }
+
+ if (items[0] == "default-preferences") {
+ var m;
+ var item;
+ defaultTestPrefSettings = [];
+ defaultRefPrefSettings = [];
+ items.shift();
+ while ((item = items.shift())) {
+ if (!(m = item.match(gPrefItemRE))) {
+ throw "Unexpected item in default-preferences list in manifest file " + aURL.spec + " line " + lineNo;
+ }
+ if (!AddPrefSettings(m[1], m[2], m[3], sandbox, defaultTestPrefSettings, defaultRefPrefSettings)) {
+ throw "Error in pref value in manifest file " + aURL.spec + " line " + lineNo;
+ }
+ }
+ continue;
+ }
+
+ var expected_status = EXPECTED_PASS;
+ var allow_silent_fail = false;
+ var minAsserts = 0;
+ var maxAsserts = 0;
+ var needs_focus = false;
+ var slow = false;
+ var testPrefSettings = defaultTestPrefSettings.concat();
+ var refPrefSettings = defaultRefPrefSettings.concat();
+ var fuzzy_max_delta = 2;
+ var fuzzy_max_pixels = 1;
+ var chaosMode = false;
+
+ while (items[0].match(/^(fails|needs-focus|random|skip|asserts|slow|require-or|silentfail|pref|test-pref|ref-pref|fuzzy|chaos-mode)/)) {
+ var item = items.shift();
+ var stat;
+ var cond;
+ var m = item.match(/^(fails|random|skip|silentfail)-if(\(.*\))$/);
+ if (m) {
+ stat = m[1];
+ // Note: m[2] contains the parentheses, and we want them.
+ cond = Components.utils.evalInSandbox(m[2], sandbox);
+ } else if (item.match(/^(fails|random|skip)$/)) {
+ stat = item;
+ cond = true;
+ } else if (item == "needs-focus") {
+ needs_focus = true;
+ cond = false;
+ } else if ((m = item.match(/^asserts\((\d+)(-\d+)?\)$/))) {
+ cond = false;
+ minAsserts = Number(m[1]);
+ maxAsserts = (m[2] == undefined) ? minAsserts
+ : Number(m[2].substring(1));
+ } else if ((m = item.match(/^asserts-if\((.*?),(\d+)(-\d+)?\)$/))) {
+ cond = false;
+ if (Components.utils.evalInSandbox("(" + m[1] + ")", sandbox)) {
+ minAsserts = Number(m[2]);
+ maxAsserts =
+ (m[3] == undefined) ? minAsserts
+ : Number(m[3].substring(1));
+ }
+ } else if (item == "slow") {
+ cond = false;
+ slow = true;
+ } else if ((m = item.match(/^require-or\((.*?)\)$/))) {
+ var args = m[1].split(/,/);
+ if (args.length != 2) {
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": wrong number of args to require-or";
+ }
+ var [precondition_str, fallback_action] = args;
+ var preconditions = precondition_str.split(/&&/);
+ cond = false;
+ for (var precondition of preconditions) {
+ if (precondition === "debugMode") {
+ // Currently unimplemented. Requires asynchronous
+ // JSD call + getting an event while no JS is running
+ stat = fallback_action;
+ cond = true;
+ break;
+ } else if (precondition === "true") {
+ // For testing
+ } else {
+ // Unknown precondition. Assume it is unimplemented.
+ stat = fallback_action;
+ cond = true;
+ break;
+ }
+ }
+ } else if ((m = item.match(/^slow-if\((.*?)\)$/))) {
+ cond = false;
+ if (Components.utils.evalInSandbox("(" + m[1] + ")", sandbox))
+ slow = true;
+ } else if (item == "silentfail") {
+ cond = false;
+ allow_silent_fail = true;
+ } else if ((m = item.match(gPrefItemRE))) {
+ cond = false;
+ if (!AddPrefSettings(m[1], m[2], m[3], sandbox, testPrefSettings, refPrefSettings)) {
+ throw "Error in pref value in manifest file " + aURL.spec + " line " + lineNo;
+ }
+ } else if ((m = item.match(/^fuzzy\((\d+),(\d+)\)$/))) {
+ cond = false;
+ expected_status = EXPECTED_FUZZY;
+ fuzzy_max_delta = Number(m[1]);
+ fuzzy_max_pixels = Number(m[2]);
+ } else if ((m = item.match(/^fuzzy-if\((.*?),(\d+),(\d+)\)$/))) {
+ cond = false;
+ if (Components.utils.evalInSandbox("(" + m[1] + ")", sandbox)) {
+ expected_status = EXPECTED_FUZZY;
+ fuzzy_max_delta = Number(m[2]);
+ fuzzy_max_pixels = Number(m[3]);
+ }
+ } else if (item == "chaos-mode") {
+ cond = false;
+ chaosMode = true;
+ } else {
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": unexpected item " + item;
+ }
+
+ if (cond) {
+ if (stat == "fails") {
+ expected_status = EXPECTED_FAIL;
+ } else if (stat == "random") {
+ expected_status = EXPECTED_RANDOM;
+ } else if (stat == "skip") {
+ expected_status = EXPECTED_DEATH;
+ } else if (stat == "silentfail") {
+ allow_silent_fail = true;
+ }
+ }
+ }
+
+ expected_status = Math.max(expected_status, inherited_status);
+
+ if (minAsserts > maxAsserts) {
+ throw "Bad range in manifest file " + aURL.spec + " line " + lineNo;
+ }
+
+ var runHttp = false;
+ var httpDepth;
+ if (items[0] == "HTTP") {
+ runHttp = (aURL.scheme == "file"); // We can't yet run the local HTTP server
+ // for non-local reftests.
+ httpDepth = 0;
+ items.shift();
+ } else if (items[0].match(/HTTP\(\.\.(\/\.\.)*\)/)) {
+ // Accept HTTP(..), HTTP(../..), HTTP(../../..), etc.
+ runHttp = (aURL.scheme == "file"); // We can't yet run the local HTTP server
+ // for non-local reftests.
+ httpDepth = (items[0].length - 5) / 3;
+ items.shift();
+ }
+
+ // do not prefix the url for include commands or urls specifying
+ // a protocol
+ if (urlprefix && items[0] != "include") {
+ if (items.length > 1 && !items[1].match(gProtocolRE)) {
+ items[1] = urlprefix + items[1];
+ }
+ if (items.length > 2 && !items[2].match(gProtocolRE)) {
+ items[2] = urlprefix + items[2];
+ }
+ }
+
+ var principal = secMan.createCodebasePrincipal(aURL, {});
+
+ if (items[0] == "include") {
+ if (items.length != 2)
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to include";
+ if (runHttp)
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": use of include with http";
+ var incURI = gIOService.newURI(items[1], null, listURL);
+ secMan.checkLoadURIWithPrincipal(principal, incURI,
+ CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
+ ReadManifest(incURI, expected_status, aFilter);
+ } else if (items[0] == TYPE_LOAD) {
+ if (items.length != 2)
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to load";
+ if (expected_status != EXPECTED_PASS &&
+ expected_status != EXPECTED_DEATH)
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect known failure type for load test";
+ var [testURI] = runHttp
+ ? ServeFiles(principal, httpDepth,
+ listURL, [items[1]])
+ : [gIOService.newURI(items[1], null, listURL)];
+ var prettyPath = runHttp
+ ? gIOService.newURI(items[1], null, listURL).spec
+ : testURI.spec;
+ secMan.checkLoadURIWithPrincipal(principal, testURI,
+ CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
+ AddTestItem({ type: TYPE_LOAD,
+ expected: expected_status,
+ allowSilentFail: allow_silent_fail,
+ prettyPath: prettyPath,
+ minAsserts: minAsserts,
+ maxAsserts: maxAsserts,
+ needsFocus: needs_focus,
+ slow: slow,
+ prefSettings1: testPrefSettings,
+ prefSettings2: refPrefSettings,
+ fuzzyMaxDelta: fuzzy_max_delta,
+ fuzzyMaxPixels: fuzzy_max_pixels,
+ url1: testURI,
+ url2: null,
+ chaosMode: chaosMode }, aFilter);
+ } else if (items[0] == TYPE_SCRIPT) {
+ if (items.length != 2)
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to script";
+ var [testURI] = runHttp
+ ? ServeFiles(principal, httpDepth,
+ listURL, [items[1]])
+ : [gIOService.newURI(items[1], null, listURL)];
+ var prettyPath = runHttp
+ ? gIOService.newURI(items[1], null, listURL).spec
+ : testURI.spec;
+ secMan.checkLoadURIWithPrincipal(principal, testURI,
+ CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
+ AddTestItem({ type: TYPE_SCRIPT,
+ expected: expected_status,
+ allowSilentFail: allow_silent_fail,
+ prettyPath: prettyPath,
+ minAsserts: minAsserts,
+ maxAsserts: maxAsserts,
+ needsFocus: needs_focus,
+ slow: slow,
+ prefSettings1: testPrefSettings,
+ prefSettings2: refPrefSettings,
+ fuzzyMaxDelta: fuzzy_max_delta,
+ fuzzyMaxPixels: fuzzy_max_pixels,
+ url1: testURI,
+ url2: null,
+ chaosMode: chaosMode }, aFilter);
+ } else if (items[0] == TYPE_REFTEST_EQUAL || items[0] == TYPE_REFTEST_NOTEQUAL) {
+ if (items.length != 3)
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": incorrect number of arguments to " + items[0];
+ var [testURI, refURI] = runHttp
+ ? ServeFiles(principal, httpDepth,
+ listURL, [items[1], items[2]])
+ : [gIOService.newURI(items[1], null, listURL),
+ gIOService.newURI(items[2], null, listURL)];
+ var prettyPath = runHttp
+ ? gIOService.newURI(items[1], null, listURL).spec
+ : testURI.spec;
+ secMan.checkLoadURIWithPrincipal(principal, testURI,
+ CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
+ secMan.checkLoadURIWithPrincipal(principal, refURI,
+ CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
+ AddTestItem({ type: items[0],
+ expected: expected_status,
+ allowSilentFail: allow_silent_fail,
+ prettyPath: prettyPath,
+ minAsserts: minAsserts,
+ maxAsserts: maxAsserts,
+ needsFocus: needs_focus,
+ slow: slow,
+ prefSettings1: testPrefSettings,
+ prefSettings2: refPrefSettings,
+ fuzzyMaxDelta: fuzzy_max_delta,
+ fuzzyMaxPixels: fuzzy_max_pixels,
+ url1: testURI,
+ url2: refURI,
+ chaosMode: chaosMode }, aFilter);
+ } else {
+ throw "Error in manifest file " + aURL.spec + " line " + lineNo + ": unknown test type " + items[0];
+ }
+ }
+}
+
+function AddURIUseCount(uri)
+{
+ if (uri == null)
+ return;
+
+ var spec = uri.spec;
+ if (spec in gURIUseCounts) {
+ gURIUseCounts[spec]++;
+ } else {
+ gURIUseCounts[spec] = 1;
+ }
+}
+
+function BuildUseCounts()
+{
+ if (gNoCanvasCache) {
+ return;
+ }
+
+ gURIUseCounts = {};
+ for (var i = 0; i < gURLs.length; ++i) {
+ var url = gURLs[i];
+ if (url.expected != EXPECTED_DEATH &&
+ (url.type == TYPE_REFTEST_EQUAL ||
+ url.type == TYPE_REFTEST_NOTEQUAL)) {
+ if (url.prefSettings1.length == 0) {
+ AddURIUseCount(gURLs[i].url1);
+ }
+ if (url.prefSettings2.length == 0) {
+ AddURIUseCount(gURLs[i].url2);
+ }
+ }
+ }
+}
+
+function ServeFiles(manifestPrincipal, depth, aURL, files)
+{
+ var listURL = aURL.QueryInterface(CI.nsIFileURL);
+ var directory = listURL.file.parent;
+
+ // Allow serving a tree that's an ancestor of the directory containing
+ // the files so that they can use resources in ../ (etc.).
+ var dirPath = "/";
+ while (depth > 0) {
+ dirPath = "/" + directory.leafName + dirPath;
+ directory = directory.parent;
+ --depth;
+ }
+
+ gCount++;
+ var path = "/" + Date.now() + "/" + gCount;
+ gServer.registerDirectory(path + "/", directory);
+
+ var secMan = CC[NS_SCRIPTSECURITYMANAGER_CONTRACTID]
+ .getService(CI.nsIScriptSecurityManager);
+
+ var testbase = gIOService.newURI("http://localhost:" + gHttpServerPort +
+ path + dirPath, null, null);
+
+ // Give the testbase URI access to XUL and XBL
+ Services.perms.add(testbase, "allowXULXBL", Services.perms.ALLOW_ACTION);
+
+ function FileToURI(file)
+ {
+ // Only serve relative URIs via the HTTP server, not absolute
+ // ones like about:blank.
+ var testURI = gIOService.newURI(file, null, testbase);
+
+ // XXX necessary? manifestURL guaranteed to be file, others always HTTP
+ secMan.checkLoadURIWithPrincipal(manifestPrincipal, testURI,
+ CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
+
+ return testURI;
+ }
+
+ return files.map(FileToURI);
+}
+
+// Return true iff this window is focused when this function returns.
+function Focus()
+{
+ var fm = CC["@mozilla.org/focus-manager;1"].getService(CI.nsIFocusManager);
+ fm.focusedWindow = gContainingWindow;
+#ifdef XP_MACOSX
+ try {
+ var dock = CC["@mozilla.org/widget/macdocksupport;1"].getService(CI.nsIMacDockSupport);
+ dock.activateApplication(true);
+ } catch(ex) {
+ }
+#endif // XP_MACOSX
+ return true;
+}
+
+function Blur()
+{
+ // On non-remote reftests, this will transfer focus to the dummy window
+ // we created to hold focus for non-needs-focus tests. Buggy tests
+ // (ones which require focus but don't request needs-focus) will then
+ // fail.
+ gContainingWindow.blur();
+}
+
+function StartCurrentTest()
+{
+ gTestLog = [];
+
+ // make sure we don't run tests that are expected to kill the browser
+ while (gURLs.length > 0) {
+ var test = gURLs[0];
+ logger.testStart(test.identifier);
+ if (test.expected == EXPECTED_DEATH) {
+ ++gTestResults.Skip;
+ logger.testEnd(test.identifier, "SKIP");
+ gURLs.shift();
+ } else if (test.needsFocus && !Focus()) {
+ // FIXME: Marking this as a known fail is dangerous! What
+ // if it starts failing all the time?
+ ++gTestResults.Skip;
+ logger.testEnd(test.identifier, "SKIP", null, "(COULDN'T GET FOCUS)");
+ gURLs.shift();
+ } else if (test.slow && !gRunSlowTests) {
+ ++gTestResults.Slow;
+ logger.testEnd(test.identifier, "SKIP", null, "(SLOW)");
+ gURLs.shift();
+ } else {
+ break;
+ }
+ }
+
+ if ((gURLs.length == 0 && gRepeat == 0) ||
+ (gRunUntilFailure && HasUnexpectedResult())) {
+ RestoreChangedPreferences();
+ DoneTests();
+ } else if (gURLs.length == 0 && gRepeat > 0) {
+ // Repeat
+ gRepeat--;
+ StartTests();
+ } else {
+ if (gURLs[0].chaosMode) {
+ gWindowUtils.enterChaosMode();
+ }
+ if (!gURLs[0].needsFocus) {
+ Blur();
+ }
+ var currentTest = gTotalTests - gURLs.length;
+ gContainingWindow.document.title = "reftest: " + currentTest + " / " + gTotalTests +
+ " (" + Math.floor(100 * (currentTest / gTotalTests)) + "%)";
+ StartCurrentURI(1);
+ }
+}
+
+function StartCurrentURI(aState)
+{
+ gState = aState;
+ gCurrentURL = gURLs[0]["url" + aState].spec;
+
+ RestoreChangedPreferences();
+
+ var prefSettings = gURLs[0]["prefSettings" + aState];
+ if (prefSettings.length > 0) {
+ var prefs = Components.classes["@mozilla.org/preferences-service;1"].
+ getService(Components.interfaces.nsIPrefBranch);
+ var badPref = undefined;
+ try {
+ prefSettings.forEach(function(ps) {
+ var oldVal;
+ if (ps.type == PREF_BOOLEAN) {
+ try {
+ oldVal = prefs.getBoolPref(ps.name);
+ } catch (e) {
+ badPref = "boolean preference '" + ps.name + "'";
+ throw "bad pref";
+ }
+ } else if (ps.type == PREF_STRING) {
+ try {
+ oldVal = prefs.getCharPref(ps.name);
+ } catch (e) {
+ badPref = "string preference '" + ps.name + "'";
+ throw "bad pref";
+ }
+ } else if (ps.type == PREF_INTEGER) {
+ try {
+ oldVal = prefs.getIntPref(ps.name);
+ } catch (e) {
+ badPref = "integer preference '" + ps.name + "'";
+ throw "bad pref";
+ }
+ } else {
+ throw "internal error - unknown preference type";
+ }
+ if (oldVal != ps.value) {
+ gPrefsToRestore.push( { name: ps.name,
+ type: ps.type,
+ value: oldVal } );
+ var value = ps.value;
+ if (ps.type == PREF_BOOLEAN) {
+ prefs.setBoolPref(ps.name, value);
+ } else if (ps.type == PREF_STRING) {
+ prefs.setCharPref(ps.name, value);
+ value = '"' + value + '"';
+ } else if (ps.type == PREF_INTEGER) {
+ prefs.setIntPref(ps.name, value);
+ }
+ logger.info("SET PREFERENCE pref(" + ps.name + "," + value + ")");
+ }
+ });
+ } catch (e) {
+ if (e == "bad pref") {
+ var test = gURLs[0];
+ if (test.expected == EXPECTED_FAIL) {
+ logger.testEnd(test.identifier, "FAIL", "FAIL",
+ "(SKIPPED; " + badPref + " not known or wrong type)");
+ ++gTestResults.Skip;
+ } else {
+ logger.testEnd(test.identifier, "FAIL", "PASS",
+ badPref + " not known or wrong type");
+ ++gTestResults.UnexpectedFail;
+ }
+
+ // skip the test that had a bad preference
+ gURLs.shift();
+ StartCurrentTest();
+ return;
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ if (prefSettings.length == 0 &&
+ gURICanvases[gCurrentURL] &&
+ (gURLs[0].type == TYPE_REFTEST_EQUAL ||
+ gURLs[0].type == TYPE_REFTEST_NOTEQUAL) &&
+ gURLs[0].maxAsserts == 0) {
+ // Pretend the document loaded --- RecordResult will notice
+ // there's already a canvas for this URL
+ gContainingWindow.setTimeout(RecordResult, 0);
+ } else {
+ var currentTest = gTotalTests - gURLs.length;
+ // Log this to preserve the same overall log format,
+ // should be removed if the format is updated
+ gDumpFn("REFTEST TEST-LOAD | " + gCurrentURL + " | " + currentTest + " / " + gTotalTests +
+ " (" + Math.floor(100 * (currentTest / gTotalTests)) + "%)\n");
+ TestBuffer("START " + gCurrentURL);
+ var type = gURLs[0].type
+ if (TYPE_SCRIPT == type) {
+ SendLoadScriptTest(gCurrentURL, gLoadTimeout);
+ } else {
+ SendLoadTest(type, gCurrentURL, gLoadTimeout);
+ }
+ }
+}
+
+function DoneTests()
+{
+ logger.suiteEnd(extra={'results': gTestResults});
+ logger.info("Slowest test took " + gSlowestTestTime + "ms (" + gSlowestTestURL + ")");
+ logger.info("Total canvas count = " + gRecycledCanvases.length);
+ if (gFailedUseWidgetLayers) {
+ LogWidgetLayersFailure();
+ }
+
+ function onStopped() {
+ let appStartup = CC["@mozilla.org/toolkit/app-startup;1"].getService(CI.nsIAppStartup);
+ appStartup.quit(CI.nsIAppStartup.eForceQuit);
+ }
+ if (gServer) {
+ gServer.stop(onStopped);
+ }
+ else {
+ onStopped();
+ }
+}
+
+function UpdateCanvasCache(url, canvas)
+{
+ var spec = url.spec;
+
+ --gURIUseCounts[spec];
+
+ if (gURIUseCounts[spec] == 0) {
+ ReleaseCanvas(canvas);
+ delete gURICanvases[spec];
+ } else if (gURIUseCounts[spec] > 0) {
+ gURICanvases[spec] = canvas;
+ } else {
+ throw "Use counts were computed incorrectly";
+ }
+}
+
+// Recompute drawWindow flags for every drawWindow operation.
+// We have to do this every time since our window can be
+// asynchronously resized (e.g. by the window manager, to make
+// it fit on screen) at unpredictable times.
+// Fortunately this is pretty cheap.
+function DoDrawWindow(ctx, x, y, w, h)
+{
+ var flags = ctx.DRAWWINDOW_DRAW_CARET | ctx.DRAWWINDOW_DRAW_VIEW;
+ var testRect = gBrowser.getBoundingClientRect();
+ if (gIgnoreWindowSize ||
+ (0 <= testRect.left &&
+ 0 <= testRect.top &&
+ gContainingWindow.innerWidth >= testRect.right &&
+ gContainingWindow.innerHeight >= testRect.bottom)) {
+ // We can use the window's retained layer manager
+ // because the window is big enough to display the entire
+ // browser element
+ flags |= ctx.DRAWWINDOW_USE_WIDGET_LAYERS;
+ } else if (gBrowserIsRemote) {
+ logger.error(gCurrentURL + " | can't drawWindow remote content");
+ ++gTestResults.Exception;
+ }
+
+ if (gDrawWindowFlags != flags) {
+ // Every time the flags change, dump the new state.
+ gDrawWindowFlags = flags;
+ var flagsStr = "DRAWWINDOW_DRAW_CARET | DRAWWINDOW_DRAW_VIEW";
+ if (flags & ctx.DRAWWINDOW_USE_WIDGET_LAYERS) {
+ flagsStr += " | DRAWWINDOW_USE_WIDGET_LAYERS";
+ } else {
+ // Output a special warning because we need to be able to detect
+ // this whenever it happens.
+ LogWidgetLayersFailure();
+ gFailedUseWidgetLayers = true;
+ }
+ logger.info("drawWindow flags = " + flagsStr +
+ "; window size = " + gContainingWindow.innerWidth + "," + gContainingWindow.innerHeight +
+ "; test browser size = " + testRect.width + "," + testRect.height);
+ }
+
+ TestBuffer("DoDrawWindow " + x + "," + y + "," + w + "," + h);
+ ctx.drawWindow(gContainingWindow, x, y, w, h, "rgb(255,255,255)",
+ gDrawWindowFlags);
+}
+
+function InitCurrentCanvasWithSnapshot()
+{
+ TestBuffer("Initializing canvas snapshot");
+
+ if (gURLs[0].type == TYPE_LOAD || gURLs[0].type == TYPE_SCRIPT) {
+ // We don't want to snapshot this kind of test
+ return false;
+ }
+
+ if (!gCurrentCanvas) {
+ gCurrentCanvas = AllocateCanvas();
+ }
+
+ var ctx = gCurrentCanvas.getContext("2d");
+ DoDrawWindow(ctx, 0, 0, gCurrentCanvas.width, gCurrentCanvas.height);
+ return true;
+}
+
+function UpdateCurrentCanvasForInvalidation(rects)
+{
+ TestBuffer("Updating canvas for invalidation");
+
+ if (!gCurrentCanvas) {
+ return;
+ }
+
+ var ctx = gCurrentCanvas.getContext("2d");
+ for (var i = 0; i < rects.length; ++i) {
+ var r = rects[i];
+ // Set left/top/right/bottom to pixel boundaries
+ var left = Math.floor(r.left);
+ var top = Math.floor(r.top);
+ var right = Math.ceil(r.right);
+ var bottom = Math.ceil(r.bottom);
+
+ // Clamp the values to the canvas size
+ left = Math.max(0, Math.min(left, gCurrentCanvas.width));
+ top = Math.max(0, Math.min(top, gCurrentCanvas.height));
+ right = Math.max(0, Math.min(right, gCurrentCanvas.width));
+ bottom = Math.max(0, Math.min(bottom, gCurrentCanvas.height));
+
+ ctx.save();
+ ctx.translate(left, top);
+ DoDrawWindow(ctx, left, top, right - left, bottom - top);
+ ctx.restore();
+ }
+}
+
+function UpdateWholeCurrentCanvasForInvalidation()
+{
+ TestBuffer("Updating entire canvas for invalidation");
+
+ if (!gCurrentCanvas) {
+ return;
+ }
+
+ var ctx = gCurrentCanvas.getContext("2d");
+ DoDrawWindow(ctx, 0, 0, gCurrentCanvas.width, gCurrentCanvas.height);
+}
+
+function RecordResult(testRunTime, errorMsg, scriptResults)
+{
+ TestBuffer("RecordResult fired");
+
+ // Keep track of which test was slowest, and how long it took.
+ if (testRunTime > gSlowestTestTime) {
+ gSlowestTestTime = testRunTime;
+ gSlowestTestURL = gCurrentURL;
+ }
+
+ // Not 'const ...' because of 'EXPECTED_*' value dependency.
+ var outputs = {};
+ outputs[EXPECTED_PASS] = {
+ true: {s: ["PASS", "PASS"], n: "Pass"},
+ false: {s: ["FAIL", "PASS"], n: "UnexpectedFail"}
+ };
+ outputs[EXPECTED_FAIL] = {
+ true: {s: ["PASS", "FAIL"], n: "UnexpectedPass"},
+ false: {s: ["FAIL", "FAIL"], n: "KnownFail"}
+ };
+ outputs[EXPECTED_RANDOM] = {
+ true: {s: ["PASS", "PASS"], n: "Random"},
+ false: {s: ["FAIL", "FAIL"], n: "Random"}
+ };
+ outputs[EXPECTED_FUZZY] = outputs[EXPECTED_PASS];
+
+ var output;
+ var extra;
+
+ if (gURLs[0].type == TYPE_LOAD) {
+ ++gTestResults.LoadOnly;
+ logger.testEnd(gURLs[0].identifier, "PASS", "PASS", "(LOAD ONLY)");
+ gCurrentCanvas = null;
+ FinishTestItem();
+ return;
+ }
+ if (gURLs[0].type == TYPE_SCRIPT) {
+ var expected = gURLs[0].expected;
+
+ if (errorMsg) {
+ // Force an unexpected failure to alert the test author to fix the test.
+ expected = EXPECTED_PASS;
+ } else if (scriptResults.length == 0) {
+ // This failure may be due to a JavaScript Engine bug causing
+ // early termination of the test. If we do not allow silent
+ // failure, report an error.
+ if (!gURLs[0].allowSilentFail)
+ errorMsg = "No test results reported. (SCRIPT)\n";
+ else
+ logger.info("An expected silent failure occurred");
+ }
+
+ if (errorMsg) {
+ output = outputs[expected][false];
+ extra = { status_msg: output.n };
+ ++gTestResults[output.n];
+ logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], errorMsg, null, extra);
+ FinishTestItem();
+ return;
+ }
+
+ var anyFailed = scriptResults.some(function(result) { return !result.passed; });
+ var outputPair;
+ if (anyFailed && expected == EXPECTED_FAIL) {
+ // If we're marked as expected to fail, and some (but not all) tests
+ // passed, treat those tests as though they were marked random
+ // (since we can't tell whether they were really intended to be
+ // marked failing or not).
+ outputPair = { true: outputs[EXPECTED_RANDOM][true],
+ false: outputs[expected][false] };
+ } else {
+ outputPair = outputs[expected];
+ }
+ var index = 0;
+ scriptResults.forEach(function(result) {
+ var output = outputPair[result.passed];
+ var extra = { status_msg: output.n };
+
+ ++gTestResults[output.n];
+ logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1],
+ result.description + " item " + (++index), null, extra);
+ });
+
+ if (anyFailed && expected == EXPECTED_PASS) {
+ FlushTestBuffer();
+ }
+
+ FinishTestItem();
+ return;
+ }
+
+ if (gURLs[0]["prefSettings" + gState].length == 0 &&
+ gURICanvases[gCurrentURL]) {
+ gCurrentCanvas = gURICanvases[gCurrentURL];
+ }
+ if (gCurrentCanvas == null) {
+ logger.error(gCurrentURL, "program error managing snapshots");
+ ++gTestResults.Exception;
+ }
+ if (gState == 1) {
+ gCanvas1 = gCurrentCanvas;
+ } else {
+ gCanvas2 = gCurrentCanvas;
+ }
+ gCurrentCanvas = null;
+
+ ResetRenderingState();
+
+ switch (gState) {
+ case 1:
+ // First document has been loaded.
+ // Proceed to load the second document.
+
+ CleanUpCrashDumpFiles();
+ StartCurrentURI(2);
+ break;
+ case 2:
+ // Both documents have been loaded. Compare the renderings and see
+ // if the comparison result matches the expected result specified
+ // in the manifest.
+
+ // number of different pixels
+ var differences;
+ // whether the two renderings match:
+ var equal;
+ var maxDifference = {};
+
+ differences = gWindowUtils.compareCanvases(gCanvas1, gCanvas2, maxDifference);
+ equal = (differences == 0);
+
+ // what is expected on this platform (PASS, FAIL, or RANDOM)
+ var expected = gURLs[0].expected;
+
+ if (maxDifference.value > 0 && maxDifference.value <= gURLs[0].fuzzyMaxDelta &&
+ differences <= gURLs[0].fuzzyMaxPixels) {
+ if (equal) {
+ throw "Inconsistent result from compareCanvases.";
+ }
+ equal = expected == EXPECTED_FUZZY;
+ logger.info("REFTEST fuzzy match");
+ }
+
+ var failedExtraCheck = gFailedNoPaint || gFailedOpaqueLayer || gFailedAssignedLayer;
+
+ // whether the comparison result matches what is in the manifest
+ var test_passed = (equal == (gURLs[0].type == TYPE_REFTEST_EQUAL)) && !failedExtraCheck;
+
+ output = outputs[expected][test_passed];
+ extra = { status_msg: output.n };
+
+ ++gTestResults[output.n];
+
+ // It's possible that we failed both an "extra check" and the normal comparison, but we don't
+ // have a way to annotate these separately, so just print an error for the extra check failures.
+ if (failedExtraCheck) {
+ var failures = [];
+ if (gFailedNoPaint) {
+ failures.push("failed reftest-no-paint");
+ }
+ // The gFailed*Messages arrays will contain messages from both the test and the reference.
+ if (gFailedOpaqueLayer) {
+ failures.push("failed reftest-opaque-layer: " + gFailedOpaqueLayerMessages.join(", "));
+ }
+ if (gFailedAssignedLayer) {
+ failures.push("failed reftest-assigned-layer: " + gFailedAssignedLayerMessages.join(", "));
+ }
+ var failureString = failures.join(", ");
+ logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], failureString, null, extra);
+ } else {
+ var message = "image comparison";
+ if (!test_passed && expected == EXPECTED_PASS ||
+ !test_passed && expected == EXPECTED_FUZZY ||
+ test_passed && expected == EXPECTED_FAIL) {
+ if (!equal) {
+ extra.max_difference = maxDifference.value;
+ extra.differences = differences;
+ var image1 = gCanvas1.toDataURL();
+ var image2 = gCanvas2.toDataURL();
+ extra.reftest_screenshots = [
+ {url:gURLs[0].identifier[0],
+ screenshot: image1.slice(image1.indexOf(",") + 1)},
+ gURLs[0].identifier[1],
+ {url:gURLs[0].identifier[2],
+ screenshot: image2.slice(image2.indexOf(",") + 1)}
+ ];
+ extra.image1 = image1;
+ extra.image2 = image2;
+ message += (", max difference: " + extra.max_difference +
+ ", number of differing pixels: " + differences);
+ } else {
+ extra.image1 = gCanvas1.toDataURL();
+ }
+ }
+ logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], message, null, extra);
+
+ if (gNoCanvasCache) {
+ ReleaseCanvas(gCanvas1);
+ ReleaseCanvas(gCanvas2);
+ } else {
+ if (gURLs[0].prefSettings1.length == 0) {
+ UpdateCanvasCache(gURLs[0].url1, gCanvas1);
+ }
+ if (gURLs[0].prefSettings2.length == 0) {
+ UpdateCanvasCache(gURLs[0].url2, gCanvas2);
+ }
+ }
+ }
+
+ if ((!test_passed && expected == EXPECTED_PASS) || (test_passed && expected == EXPECTED_FAIL)) {
+ FlushTestBuffer();
+ }
+
+ CleanUpCrashDumpFiles();
+ FinishTestItem();
+ break;
+ default:
+ throw "Unexpected state.";
+ }
+}
+
+function LoadFailed(why)
+{
+ ++gTestResults.FailedLoad;
+ // Once bug 896840 is fixed, this can go away, but for now it will give log
+ // output that is TBPL starable for bug 789751 and bug 720452.
+ if (!why) {
+ logger.error("load failed with unknown reason");
+ }
+ logger.testEnd(gURLs[0]["url" + gState].spec, "FAIL", "PASS", "load failed: " + why);
+ FlushTestBuffer();
+ FinishTestItem();
+}
+
+function RemoveExpectedCrashDumpFiles()
+{
+ if (gExpectingProcessCrash) {
+ for (let crashFilename of gExpectedCrashDumpFiles) {
+ let file = gCrashDumpDir.clone();
+ file.append(crashFilename);
+ if (file.exists()) {
+ file.remove(false);
+ }
+ }
+ }
+ gExpectedCrashDumpFiles.length = 0;
+}
+
+function FindUnexpectedCrashDumpFiles()
+{
+ if (!gCrashDumpDir.exists()) {
+ return;
+ }
+
+ let entries = gCrashDumpDir.directoryEntries;
+ if (!entries) {
+ return;
+ }
+
+ let foundCrashDumpFile = false;
+ while (entries.hasMoreElements()) {
+ let file = entries.getNext().QueryInterface(CI.nsIFile);
+ let path = String(file.path);
+ if (path.match(/\.(dmp|extra)$/) && !gUnexpectedCrashDumpFiles[path]) {
+ if (!foundCrashDumpFile) {
+ ++gTestResults.UnexpectedFail;
+ foundCrashDumpFile = true;
+ logger.testEnd(gCurrentURL, "FAIL", "PASS", "This test left crash dumps behind, but we weren't expecting it to!");
+ }
+ logger.info("Found unexpected crash dump file " + path);
+ gUnexpectedCrashDumpFiles[path] = true;
+ }
+ }
+}
+
+function CleanUpCrashDumpFiles()
+{
+ RemoveExpectedCrashDumpFiles();
+ FindUnexpectedCrashDumpFiles();
+ gExpectingProcessCrash = false;
+}
+
+function FinishTestItem()
+{
+ // Replace document with BLANK_URL_FOR_CLEARING in case there are
+ // assertions when unloading.
+ logger.debug("Loading a blank page");
+ // After clearing, content will notify us of the assertion count
+ // and tests will continue.
+ SendClear();
+ gFailedNoPaint = false;
+ gFailedOpaqueLayer = false;
+ gFailedOpaqueLayerMessages = [];
+ gFailedAssignedLayer = false;
+ gFailedAssignedLayerMessages = [];
+}
+
+function DoAssertionCheck(numAsserts)
+{
+ if (gDebug.isDebugBuild) {
+ if (gBrowserIsRemote) {
+ // Count chrome-process asserts too when content is out of
+ // process.
+ var newAssertionCount = gDebug.assertionCount;
+ var numLocalAsserts = newAssertionCount - gAssertionCount;
+ gAssertionCount = newAssertionCount;
+
+ numAsserts += numLocalAsserts;
+ }
+
+ var minAsserts = gURLs[0].minAsserts;
+ var maxAsserts = gURLs[0].maxAsserts;
+
+ var expectedAssertions = "expected " + minAsserts;
+ if (minAsserts != maxAsserts) {
+ expectedAssertions += " to " + maxAsserts;
+ }
+ expectedAssertions += " assertions";
+
+ if (numAsserts < minAsserts) {
+ ++gTestResults.AssertionUnexpectedFixed;
+ gDumpFn("REFTEST TEST-UNEXPECTED-PASS | " + gURLs[0].prettyPath +
+ " | assertion count " + numAsserts + " is less than " +
+ expectedAssertions + "\n");
+ } else if (numAsserts > maxAsserts) {
+ ++gTestResults.AssertionUnexpected;
+ gDumpFn("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath +
+ " | assertion count " + numAsserts + " is more than " +
+ expectedAssertions + "\n");
+ } else if (numAsserts != 0) {
+ ++gTestResults.AssertionKnown;
+ gDumpFn("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath +
+ "assertion count " + numAsserts + " matches " +
+ expectedAssertions + "\n");
+ }
+ }
+
+ if (gURLs[0].chaosMode) {
+ gWindowUtils.leaveChaosMode();
+ }
+
+ // And start the next test.
+ gURLs.shift();
+ StartCurrentTest();
+}
+
+function ResetRenderingState()
+{
+ SendResetRenderingState();
+ // We would want to clear any viewconfig here, if we add support for it
+}
+
+function RestoreChangedPreferences()
+{
+ if (gPrefsToRestore.length > 0) {
+ var prefs = Components.classes["@mozilla.org/preferences-service;1"].
+ getService(Components.interfaces.nsIPrefBranch);
+ gPrefsToRestore.reverse();
+ gPrefsToRestore.forEach(function(ps) {
+ var value = ps.value;
+ if (ps.type == PREF_BOOLEAN) {
+ prefs.setBoolPref(ps.name, value);
+ } else if (ps.type == PREF_STRING) {
+ prefs.setCharPref(ps.name, value);
+ value = '"' + value + '"';
+ } else if (ps.type == PREF_INTEGER) {
+ prefs.setIntPref(ps.name, value);
+ }
+ logger.info("RESTORE PREFERENCE pref(" + ps.name + "," + value + ")");
+ });
+ gPrefsToRestore = [];
+ }
+}
+
+function RegisterMessageListenersAndLoadContentScript()
+{
+ gBrowserMessageManager.addMessageListener(
+ "reftest:AssertionCount",
+ function (m) { RecvAssertionCount(m.json.count); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:ContentReady",
+ function (m) { return RecvContentReady(m.data); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:Exception",
+ function (m) { RecvException(m.json.what) }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:FailedLoad",
+ function (m) { RecvFailedLoad(m.json.why); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:FailedNoPaint",
+ function (m) { RecvFailedNoPaint(); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:FailedOpaqueLayer",
+ function (m) { RecvFailedOpaqueLayer(m.json.why); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:FailedAssignedLayer",
+ function (m) { RecvFailedAssignedLayer(m.json.why); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:InitCanvasWithSnapshot",
+ function (m) { return RecvInitCanvasWithSnapshot(); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:Log",
+ function (m) { RecvLog(m.json.type, m.json.msg); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:ScriptResults",
+ function (m) { RecvScriptResults(m.json.runtimeMs, m.json.error, m.json.results); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:TestDone",
+ function (m) { RecvTestDone(m.json.runtimeMs); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:UpdateCanvasForInvalidation",
+ function (m) { RecvUpdateCanvasForInvalidation(m.json.rects); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:UpdateWholeCanvasForInvalidation",
+ function (m) { RecvUpdateWholeCanvasForInvalidation(); }
+ );
+ gBrowserMessageManager.addMessageListener(
+ "reftest:ExpectProcessCrash",
+ function (m) { RecvExpectProcessCrash(); }
+ );
+
+ gBrowserMessageManager.loadFrameScript("chrome://reftest/content/reftest-content.js", true, true);
+}
+
+function RecvAssertionCount(count)
+{
+ DoAssertionCheck(count);
+}
+
+function RecvContentReady(info)
+{
+ gContentGfxInfo = info.gfx;
+ InitAndStartRefTests();
+ return { remote: gBrowserIsRemote };
+}
+
+function RecvException(what)
+{
+ logger.error(gCurrentURL + " | " + what);
+ ++gTestResults.Exception;
+}
+
+function RecvFailedLoad(why)
+{
+ LoadFailed(why);
+}
+
+function RecvFailedNoPaint()
+{
+ gFailedNoPaint = true;
+}
+
+function RecvFailedOpaqueLayer(why) {
+ gFailedOpaqueLayer = true;
+ gFailedOpaqueLayerMessages.push(why);
+}
+
+function RecvFailedAssignedLayer(why) {
+ gFailedAssignedLayer = true;
+ gFailedAssignedLayerMessages.push(why);
+}
+
+function RecvInitCanvasWithSnapshot()
+{
+ var painted = InitCurrentCanvasWithSnapshot();
+ return { painted: painted };
+}
+
+function RecvLog(type, msg)
+{
+ msg = "[CONTENT] " + msg;
+ if (type == "info") {
+ TestBuffer(msg);
+ } else if (type == "warning") {
+ logger.warning(msg);
+ } else {
+ logger.error("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | unknown log type " + type + "\n");
+ ++gTestResults.Exception;
+ }
+}
+
+function RecvScriptResults(runtimeMs, error, results)
+{
+ RecordResult(runtimeMs, error, results);
+}
+
+function RecvTestDone(runtimeMs)
+{
+ RecordResult(runtimeMs, '', [ ]);
+}
+
+function RecvUpdateCanvasForInvalidation(rects)
+{
+ UpdateCurrentCanvasForInvalidation(rects);
+}
+
+function RecvUpdateWholeCanvasForInvalidation()
+{
+ UpdateWholeCurrentCanvasForInvalidation();
+}
+
+function OnProcessCrashed(subject, topic, data)
+{
+ var id;
+ subject = subject.QueryInterface(CI.nsIPropertyBag2);
+ if (topic == "plugin-crashed") {
+ id = subject.getPropertyAsAString("pluginDumpID");
+ } else if (topic == "ipc:content-shutdown") {
+ id = subject.getPropertyAsAString("dumpID");
+ }
+ if (id) {
+ gExpectedCrashDumpFiles.push(id + ".dmp");
+ gExpectedCrashDumpFiles.push(id + ".extra");
+ }
+}
+
+function RegisterProcessCrashObservers()
+{
+ var os = CC[NS_OBSERVER_SERVICE_CONTRACTID]
+ .getService(CI.nsIObserverService);
+ os.addObserver(OnProcessCrashed, "plugin-crashed", false);
+ os.addObserver(OnProcessCrashed, "ipc:content-shutdown", false);
+}
+
+function RecvExpectProcessCrash()
+{
+ gExpectingProcessCrash = true;
+}
+
+function SendClear()
+{
+ gBrowserMessageManager.sendAsyncMessage("reftest:Clear");
+}
+
+function SendLoadScriptTest(uri, timeout)
+{
+ gBrowserMessageManager.sendAsyncMessage("reftest:LoadScriptTest",
+ { uri: uri, timeout: timeout });
+}
+
+function SendLoadTest(type, uri, timeout)
+{
+ gBrowserMessageManager.sendAsyncMessage("reftest:LoadTest",
+ { type: type, uri: uri, timeout: timeout }
+ );
+}
+
+function SendResetRenderingState()
+{
+ gBrowserMessageManager.sendAsyncMessage("reftest:ResetRenderingState");
+}
diff --git a/layout/tools/reftest/reftest.xul b/layout/tools/reftest/reftest.xul
new file mode 100644
index 000000000..ae5355d20
--- /dev/null
+++ b/layout/tools/reftest/reftest.xul
@@ -0,0 +1,14 @@
+<!-- vim: set shiftwidth=4 tabstop=8 autoindent expandtab: -->
+<!-- This Source Code Form is subject to the terms of the Mozilla Public
+ - License, v. 2.0. If a copy of the MPL was not distributed with this
+ - file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
+<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
+ id="reftest-window"
+ hidechrome="true"
+ onload="OnRefTestLoad();"
+ onunload="OnRefTestUnload();"
+ style="background:white; overflow:hidden"
+ >
+ <script type="application/ecmascript" src="reftest.jsm" />
+ <!-- The reftest browser element is dynamically created, here -->
+</window>
diff --git a/layout/tools/reftest/reftest/__init__.py b/layout/tools/reftest/reftest/__init__.py
new file mode 100644
index 000000000..a0c079dc0
--- /dev/null
+++ b/layout/tools/reftest/reftest/__init__.py
@@ -0,0 +1,132 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import unicode_literals
+
+import os
+import re
+
+RE_COMMENT = re.compile(r'\s+#')
+RE_HTTP = re.compile(r'HTTP\((\.\.(\/\.\.)*)\)')
+RE_PROTOCOL = re.compile(r'^\w+:')
+FAILURE_TYPES = (
+ 'fails',
+ 'fails-if',
+ 'needs-focus',
+ 'random',
+ 'random-if',
+ 'silentfail',
+ 'silentfail-if',
+ 'skip',
+ 'skip-if',
+ 'slow',
+ 'slow-if',
+ 'fuzzy',
+ 'fuzzy-if',
+ 'require-or',
+ 'asserts',
+ 'asserts-if',
+)
+PREF_ITEMS = (
+ 'pref',
+ 'test-pref',
+ 'ref-pref',
+)
+
+class ReftestManifest(object):
+ """Represents a parsed reftest manifest.
+
+ We currently only capture file information because that is the only thing
+ tools require.
+ """
+ def __init__(self, finder=None):
+ self.path = None
+ self.dirs = set()
+ self.files = set()
+ self.manifests = set()
+ self.tests = set()
+ self.finder = finder
+
+ def load(self, path):
+ """Parse a reftest manifest file."""
+ normalized_path = os.path.normpath(os.path.abspath(path))
+ self.manifests.add(normalized_path)
+ if not self.path:
+ self.path = normalized_path
+
+ mdir = os.path.dirname(normalized_path)
+ self.dirs.add(mdir)
+
+ if self.finder:
+ lines = self.finder.get(path).read().splitlines()
+ else:
+ with open(path, 'r') as fh:
+ lines = fh.read().splitlines()
+
+ urlprefix = ''
+ for line in lines:
+ line = line.decode('utf-8')
+
+ # Entire line is a comment.
+ if line.startswith('#'):
+ continue
+
+ # Comments can begin mid line. Strip them.
+ m = RE_COMMENT.search(line)
+ if m:
+ line = line[:m.start()]
+ line = line.strip()
+ if not line:
+ continue
+
+ items = line.split()
+ tests = []
+
+ for i in range(len(items)):
+ item = items[i]
+
+ if item.startswith(FAILURE_TYPES):
+ continue
+ if item.startswith(PREF_ITEMS):
+ continue
+ if item == 'HTTP':
+ continue
+
+ m = RE_HTTP.match(item)
+ if m:
+ # Need to package the referenced directory.
+ self.dirs.add(os.path.normpath(os.path.join(
+ mdir, m.group(1))))
+ continue
+
+ if item == 'url-prefix':
+ urlprefix = items[i+1]
+ break
+
+ if item == 'default-preferences':
+ break
+
+ if item == 'include':
+ self.load(os.path.join(mdir, items[i+1]))
+ break
+
+ if item == 'load' or item == 'script':
+ tests.append(items[i+1])
+ break
+
+ if item == '==' or item == '!=':
+ tests.extend(items[i+1:i+3])
+ break
+
+ for f in tests:
+ # We can't package about: or data: URIs.
+ # Discarding data isn't correct for a parser. But retaining
+ # all data isn't currently a requirement.
+ if RE_PROTOCOL.match(f):
+ continue
+
+ test = os.path.normpath(os.path.join(mdir, urlprefix + f))
+ self.files.add(test)
+ self.dirs.add(os.path.dirname(test))
+ self.tests.add((test, normalized_path))
diff --git a/layout/tools/reftest/reftestcommandline.py b/layout/tools/reftest/reftestcommandline.py
new file mode 100644
index 000000000..da76fbd9a
--- /dev/null
+++ b/layout/tools/reftest/reftestcommandline.py
@@ -0,0 +1,749 @@
+import argparse
+import os
+import sys
+from collections import OrderedDict
+from urlparse import urlparse
+
+import mozlog
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+class ReftestArgumentsParser(argparse.ArgumentParser):
+ def __init__(self, **kwargs):
+ super(ReftestArgumentsParser, self).__init__(**kwargs)
+
+ # Try to import a MozbuildObject. Success indicates that we are
+ # running from a source tree. This allows some defaults to be set
+ # from the source tree.
+ try:
+ from mozbuild.base import MozbuildObject
+ self.build_obj = MozbuildObject.from_environment(cwd=here)
+ except ImportError:
+ self.build_obj = None
+
+ self.add_argument("--xre-path",
+ action="store",
+ type=str,
+ dest="xrePath",
+ # individual scripts will set a sane default
+ default=None,
+ help="absolute path to directory containing XRE (probably xulrunner)")
+
+ self.add_argument("--symbols-path",
+ action="store",
+ type=str,
+ dest="symbolsPath",
+ default=None,
+ help="absolute path to directory containing breakpad symbols, or the URL of a zip file containing symbols")
+
+ self.add_argument("--debugger",
+ action="store",
+ dest="debugger",
+ help="use the given debugger to launch the application")
+
+ self.add_argument("--debugger-args",
+ action="store",
+ dest="debuggerArgs",
+ help="pass the given args to the debugger _before_ "
+ "the application on the command line")
+
+ self.add_argument("--debugger-interactive",
+ action="store_true",
+ dest="debuggerInteractive",
+ help="prevents the test harness from redirecting "
+ "stdout and stderr for interactive debuggers")
+
+ self.add_argument("--appname",
+ action="store",
+ type=str,
+ dest="app",
+ default=None,
+ help="absolute path to application, overriding default")
+
+ self.add_argument("--extra-profile-file",
+ action="append",
+ dest="extraProfileFiles",
+ default=[],
+ help="copy specified files/dirs to testing profile")
+
+ self.add_argument("--timeout",
+ action="store",
+ dest="timeout",
+ type=int,
+ default=5 * 60, # 5 minutes per bug 479518
+ help="reftest will timeout in specified number of seconds. [default %(default)s].")
+
+ self.add_argument("--leak-threshold",
+ action="store",
+ type=int,
+ dest="defaultLeakThreshold",
+ default=0,
+ help="fail if the number of bytes leaked in default "
+ "processes through refcounted objects (or bytes "
+ "in classes with MOZ_COUNT_CTOR and MOZ_COUNT_DTOR) "
+ "is greater than the given number")
+
+ self.add_argument("--utility-path",
+ action="store",
+ type=str,
+ dest="utilityPath",
+ default=self.build_obj.bindir if self.build_obj else None,
+ help="absolute path to directory containing utility "
+ "programs (xpcshell, ssltunnel, certutil)")
+
+ self.add_argument("--total-chunks",
+ type=int,
+ dest="totalChunks",
+ help="how many chunks to split the tests up into")
+
+ self.add_argument("--this-chunk",
+ type=int,
+ dest="thisChunk",
+ help="which chunk to run between 1 and --total-chunks")
+
+ self.add_argument("--log-file",
+ action="store",
+ type=str,
+ dest="logFile",
+ default=None,
+ help="file to log output to in addition to stdout")
+
+ self.add_argument("--skip-slow-tests",
+ dest="skipSlowTests",
+ action="store_true",
+ default=False,
+ help="skip tests marked as slow when running")
+
+ self.add_argument("--ignore-window-size",
+ dest="ignoreWindowSize",
+ action="store_true",
+ default=False,
+ help="ignore the window size, which may cause spurious failures and passes")
+
+ self.add_argument("--install-extension",
+ action="append",
+ dest="extensionsToInstall",
+ default=[],
+ help="install the specified extension in the testing profile. "
+ "The extension file's name should be <id>.xpi where <id> is "
+ "the extension's id as indicated in its install.rdf. "
+ "An optional path can be specified too.")
+
+ self.add_argument("--marionette",
+ default=None,
+ help="host:port to use when connecting to Marionette")
+
+ self.add_argument("--marionette-port-timeout",
+ default=None,
+ help=argparse.SUPPRESS)
+
+ self.add_argument("--marionette-socket-timeout",
+ default=None,
+ help=argparse.SUPPRESS)
+
+ self.add_argument("--marionette-startup-timeout",
+ default=None,
+ help=argparse.SUPPRESS)
+
+ self.add_argument("--setenv",
+ action="append",
+ type=str,
+ default=[],
+ dest="environment",
+ metavar="NAME=VALUE",
+ help="sets the given variable in the application's "
+ "environment")
+
+ self.add_argument("--filter",
+ action="store",
+ type=str,
+ dest="filter",
+ help="specifies a regular expression (as could be passed to the JS "
+ "RegExp constructor) to test against URLs in the reftest manifest; "
+ "only test items that have a matching test URL will be run.")
+
+ self.add_argument("--shuffle",
+ action="store_true",
+ default=False,
+ dest="shuffle",
+ help="run reftests in random order")
+
+ self.add_argument("--run-until-failure",
+ action="store_true",
+ default=False,
+ dest="runUntilFailure",
+ help="stop running on the first failure. Useful for RR recordings.")
+
+ self.add_argument("--repeat",
+ action="store",
+ type=int,
+ default=0,
+ dest="repeat",
+ help="number of times the select test(s) will be executed. Useful for "
+ "finding intermittent failures.")
+
+ self.add_argument("--focus-filter-mode",
+ action="store",
+ type=str,
+ dest="focusFilterMode",
+ default="all",
+ help="filters tests to run by whether they require focus. "
+ "Valid values are `all', `needs-focus', or `non-needs-focus'. "
+ "Defaults to `all'.")
+
+ self.add_argument("--disable-e10s",
+ action="store_false",
+ default=True,
+ dest="e10s",
+ help="disables content processes")
+
+ self.add_argument("--setpref",
+ action="append",
+ type=str,
+ default=[],
+ dest="extraPrefs",
+ metavar="PREF=VALUE",
+ help="defines an extra user preference")
+
+ self.add_argument("--reftest-extension-path",
+ action="store",
+ dest="reftestExtensionPath",
+ help="Path to the reftest extension")
+
+ self.add_argument("--special-powers-extension-path",
+ action="store",
+ dest="specialPowersExtensionPath",
+ help="Path to the special powers extension")
+
+ self.add_argument("--suite",
+ choices=["reftest", "crashtest", "jstestbrowser"],
+ default=None,
+ help=argparse.SUPPRESS)
+
+ self.add_argument("--cleanup-crashes",
+ action = "store_true",
+ dest = "cleanupCrashes",
+ default = False,
+ help = "Delete pending crash reports before running tests.")
+
+ self.add_argument("tests",
+ metavar="TEST_PATH",
+ nargs="*",
+ help="Path to test file, manifest file, or directory containing tests")
+
+ mozlog.commandline.add_logging_group(self)
+
+ def get_ip(self):
+ import moznetwork
+ if os.name != "nt":
+ return moznetwork.get_ip()
+ else:
+ self.error(
+ "ERROR: you must specify a --remote-webserver=<ip address>\n")
+
+ def set_default_suite(self, options):
+ manifests = OrderedDict([("reftest.list", "reftest"),
+ ("crashtests.list", "crashtest"),
+ ("jstests.list", "jstestbrowser")])
+
+ for test_path in options.tests:
+ file_name = os.path.basename(test_path)
+ if file_name in manifests:
+ options.suite = manifests[file_name]
+ return
+
+ for test_path in options.tests:
+ for manifest_file, suite in manifests.iteritems():
+ if os.path.exists(os.path.join(test_path, manifest_file)):
+ options.suite = suite
+ return
+
+ self.error("Failed to determine test suite; supply --suite to set this explicitly")
+
+ def validate(self, options, reftest):
+ if not options.tests:
+ # Can't just set this in the argument parser because mach will set a default
+ self.error("Must supply at least one path to a manifest file, test directory, or test file to run.")
+
+ if options.suite is None:
+ self.set_default_suite(options)
+
+ if options.totalChunks is not None and options.thisChunk is None:
+ self.error(
+ "thisChunk must be specified when totalChunks is specified")
+
+ if options.totalChunks:
+ if not 1 <= options.thisChunk <= options.totalChunks:
+ self.error("thisChunk must be between 1 and totalChunks")
+
+ if options.logFile:
+ options.logFile = reftest.getFullPath(options.logFile)
+
+ if options.xrePath is not None:
+ if not os.access(options.xrePath, os.F_OK):
+ self.error("--xre-path '%s' not found" % options.xrePath)
+ if not os.path.isdir(options.xrePath):
+ self.error("--xre-path '%s' is not a directory" %
+ options.xrePath)
+ options.xrePath = reftest.getFullPath(options.xrePath)
+
+ if options.reftestExtensionPath is None:
+ if self.build_obj is not None:
+ reftestExtensionPath = os.path.join(self.build_obj.topobjdir, "_tests",
+ "reftest", "reftest")
+ else:
+ reftestExtensionPath = os.path.join(here, "reftest")
+ options.reftestExtensionPath = os.path.normpath(reftestExtensionPath)
+
+ if (options.specialPowersExtensionPath is None and
+ options.suite in ["crashtest", "jstestbrowser"]):
+ if self.build_obj is not None:
+ specialPowersExtensionPath = os.path.join(self.build_obj.topobjdir, "_tests",
+ "reftest", "specialpowers")
+ else:
+ specialPowersExtensionPath = os.path.join(here, "specialpowers")
+ options.specialPowersExtensionPath = os.path.normpath(specialPowersExtensionPath)
+
+ options.leakThresholds = {
+ "default": options.defaultLeakThreshold,
+ "tab": 5000, # See dependencies of bug 1051230.
+ }
+
+
+class DesktopArgumentsParser(ReftestArgumentsParser):
+ def __init__(self, **kwargs):
+ super(DesktopArgumentsParser, self).__init__(**kwargs)
+
+ self.add_argument("--run-tests-in-parallel",
+ action="store_true",
+ default=False,
+ dest="runTestsInParallel",
+ help="run tests in parallel if possible")
+
+ def _prefs_gpu(self):
+ if mozinfo.os != "win":
+ return ["layers.acceleration.force-enabled=true"]
+ return []
+
+ def validate(self, options, reftest):
+ super(DesktopArgumentsParser, self).validate(options, reftest)
+
+ if options.runTestsInParallel:
+ if options.logFile is not None:
+ self.error("cannot specify logfile with parallel tests")
+ if options.totalChunks is not None or options.thisChunk is not None:
+ self.error(
+ "cannot specify thisChunk or totalChunks with parallel tests")
+ if options.focusFilterMode != "all":
+ self.error("cannot specify focusFilterMode with parallel tests")
+ if options.debugger is not None:
+ self.error("cannot specify a debugger with parallel tests")
+
+ if options.debugger:
+ # valgrind and some debuggers may cause Gecko to start slowly. Make sure
+ # marionette waits long enough to connect.
+ options.marionette_port_timeout = 900
+ options.marionette_socket_timeout = 540
+
+ if not options.tests:
+ self.error("No test files specified.")
+
+ if options.app is None:
+ bin_dir = (self.build_obj.get_binary_path() if
+ self.build_obj and self.build_obj.substs[
+ 'MOZ_BUILD_APP'] != 'mobile/android'
+ else None)
+
+ if bin_dir:
+ options.app = bin_dir
+
+ if options.symbolsPath and len(urlparse(options.symbolsPath).scheme) < 2:
+ options.symbolsPath = reftest.getFullPath(options.symbolsPath)
+
+ options.utilityPath = reftest.getFullPath(options.utilityPath)
+
+
+class B2GArgumentParser(ReftestArgumentsParser):
+ def __init__(self, **kwargs):
+ super(B2GArgumentParser, self).__init__(**kwargs)
+
+ self.add_argument("--browser-arg",
+ action="store",
+ type=str,
+ dest="browser_arg",
+ help="Optional command-line arg to pass to the browser")
+
+ self.add_argument("--b2gpath",
+ action="store",
+ type=str,
+ dest="b2gPath",
+ help="path to B2G repo or qemu dir")
+
+ self.add_argument("--emulator",
+ action="store",
+ type=str,
+ dest="emulator",
+ help="Architecture of emulator to use: x86 or arm")
+
+ self.add_argument("--emulator-res",
+ action="store",
+ type=str,
+ dest="emulator_res",
+ help="Emulator resolution of the format '<width>x<height>'")
+
+ self.add_argument("--no-window",
+ action="store_true",
+ dest="noWindow",
+ default=False,
+ help="Pass --no-window to the emulator")
+
+ self.add_argument("--adbpath",
+ action="store",
+ type=str,
+ dest="adb_path",
+ default="adb",
+ help="path to adb")
+
+ self.add_argument("--deviceIP",
+ action="store",
+ type=str,
+ dest="deviceIP",
+ help="ip address of remote device to test")
+
+ self.add_argument("--devicePort",
+ action="store",
+ type=str,
+ dest="devicePort",
+ default="20701",
+ help="port of remote device to test")
+
+ self.add_argument("--remote-logfile",
+ action="store",
+ type=str,
+ dest="remoteLogFile",
+ help="Name of log file on the device relative to the device root. PLEASE ONLY USE A FILENAME.")
+
+ self.add_argument("--remote-webserver",
+ action="store",
+ type=str,
+ dest="remoteWebServer",
+ help="ip address where the remote web server is hosted at")
+
+ self.add_argument("--http-port",
+ action="store",
+ type=str,
+ dest="httpPort",
+ help="ip address where the remote web server is hosted at")
+
+ self.add_argument("--ssl-port",
+ action="store",
+ type=str,
+ dest="sslPort",
+ help="ip address where the remote web server is hosted at")
+
+ self.add_argument("--pidfile",
+ action="store",
+ type=str,
+ dest="pidFile",
+ default="",
+ help="name of the pidfile to generate")
+
+ self.add_argument("--gecko-path",
+ action="store",
+ type=str,
+ dest="geckoPath",
+ help="the path to a gecko distribution that should "
+ "be installed on the emulator prior to test")
+
+ self.add_argument("--logdir",
+ action="store",
+ type=str,
+ dest="logdir",
+ help="directory to store log files")
+
+ self.add_argument('--busybox',
+ action='store',
+ type=str,
+ dest='busybox',
+ help="Path to busybox binary to install on device")
+
+ self.add_argument("--httpd-path",
+ action="store",
+ type=str,
+ dest="httpdPath",
+ help="path to the httpd.js file")
+
+ self.add_argument("--profile",
+ action="store",
+ type=str,
+ dest="profile",
+ help="for mulet testing, the path to the "
+ "gaia profile to use")
+
+ self.add_argument("--mulet",
+ action="store_true",
+ dest="mulet",
+ default=False,
+ help="Run the tests on a B2G desktop build")
+
+ self.set_defaults(remoteTestRoot=None,
+ logFile="reftest.log",
+ autorun=True,
+ closeWhenDone=True,
+ testPath="")
+
+ def validate_remote(self, options, automation):
+ if not options.app:
+ options.app = automation.DEFAULT_APP
+
+ if not options.remoteTestRoot:
+ options.remoteTestRoot = automation._devicemanager.deviceRoot + \
+ "/reftest"
+
+ options.remoteProfile = options.remoteTestRoot + "/profile"
+
+ productRoot = options.remoteTestRoot + "/" + automation._product
+ if options.utilityPath is None:
+ options.utilityPath = productRoot + "/bin"
+
+ if not options.httpPort:
+ options.httpPort = automation.DEFAULT_HTTP_PORT
+
+ if not options.sslPort:
+ options.sslPort = automation.DEFAULT_SSL_PORT
+
+ if options.remoteWebServer is None:
+ options.remoteWebServer = self.get_ip()
+
+ options.webServer = options.remoteWebServer
+
+ if options.geckoPath and not options.emulator:
+ self.error(
+ "You must specify --emulator if you specify --gecko-path")
+
+ if options.logdir and not options.emulator:
+ self.error("You must specify --emulator if you specify --logdir")
+
+ if options.remoteLogFile is None:
+ options.remoteLogFile = "reftest.log"
+
+ options.localLogName = options.remoteLogFile
+ options.remoteLogFile = options.remoteTestRoot + \
+ '/' + options.remoteLogFile
+
+ # Ensure that the options.logfile (which the base class uses) is set to
+ # the remote setting when running remote. Also, if the user set the
+ # log file name there, use that instead of reusing the remotelogfile as
+ # above.
+ if (options.logFile):
+ # If the user specified a local logfile name use that
+ options.localLogName = options.logFile
+ options.logFile = options.remoteLogFile
+
+ # Only reset the xrePath if it wasn't provided
+ if options.xrePath is None:
+ options.xrePath = options.utilityPath
+ options.xrePath = os.path.abspath(options.xrePath)
+
+ if options.pidFile != "":
+ f = open(options.pidFile, 'w')
+ f.write("%s" % os.getpid())
+ f.close()
+
+ # httpd-path is specified by standard makefile targets and may be specified
+ # on the command line to select a particular version of httpd.js. If not
+ # specified, try to select the one from from the xre bundle, as
+ # required in bug 882932.
+ if not options.httpdPath:
+ options.httpdPath = os.path.join(options.xrePath, "components")
+
+ return options
+
+
+class RemoteArgumentsParser(ReftestArgumentsParser):
+ def __init__(self, **kwargs):
+ super(RemoteArgumentsParser, self).__init__()
+
+ # app, xrePath and utilityPath variables are set in main function
+ self.set_defaults(logFile="reftest.log",
+ app="",
+ xrePath="",
+ utilityPath="",
+ localLogName=None)
+
+ self.add_argument("--remote-app-path",
+ action="store",
+ type=str,
+ dest="remoteAppPath",
+ help="Path to remote executable relative to device root using only forward slashes. Either this or app must be specified, but not both.")
+
+ self.add_argument("--adbpath",
+ action="store",
+ type=str,
+ dest="adb_path",
+ default="adb",
+ help="path to adb")
+
+ self.add_argument("--deviceIP",
+ action="store",
+ type=str,
+ dest="deviceIP",
+ help="ip address of remote device to test")
+
+ self.add_argument("--deviceSerial",
+ action="store",
+ type=str,
+ dest="deviceSerial",
+ help="adb serial number of remote device to test")
+
+ self.add_argument("--devicePort",
+ action="store",
+ type=str,
+ default="20701",
+ dest="devicePort",
+ help="port of remote device to test")
+
+ self.add_argument("--remote-product-name",
+ action="store",
+ type=str,
+ dest="remoteProductName",
+ default="fennec",
+ help="Name of product to test - either fennec or firefox, defaults to fennec")
+
+ self.add_argument("--remote-webserver",
+ action="store",
+ type=str,
+ dest="remoteWebServer",
+ help="IP Address of the webserver hosting the reftest content")
+
+ self.add_argument("--http-port",
+ action="store",
+ type=str,
+ dest="httpPort",
+ help="port of the web server for http traffic")
+
+ self.add_argument("--ssl-port",
+ action="store",
+ type=str,
+ dest="sslPort",
+ help="Port for https traffic to the web server")
+
+ self.add_argument("--remote-logfile",
+ action="store",
+ type=str,
+ dest="remoteLogFile",
+ default="reftest.log",
+ help="Name of log file on the device relative to device root. PLEASE USE ONLY A FILENAME.")
+
+ self.add_argument("--pidfile",
+ action="store",
+ type=str,
+ dest="pidFile",
+ default="",
+ help="name of the pidfile to generate")
+
+ self.add_argument("--dm_trans",
+ action="store",
+ type=str,
+ dest="dm_trans",
+ default="sut",
+ help="the transport to use to communicate with device: [adb|sut]; default=sut")
+
+ self.add_argument("--remoteTestRoot",
+ action="store",
+ type=str,
+ dest="remoteTestRoot",
+ help="remote directory to use as test root (eg. /mnt/sdcard/tests or /data/local/tests)")
+
+ self.add_argument("--httpd-path",
+ action="store",
+ type=str,
+ dest="httpdPath",
+ help="path to the httpd.js file")
+
+ self.add_argument("--no-device-info",
+ action="store_false",
+ dest="printDeviceInfo",
+ default=True,
+ help="do not display verbose diagnostics about the remote device")
+
+ def validate_remote(self, options, automation):
+ # Ensure our defaults are set properly for everything we can infer
+ if not options.remoteTestRoot:
+ options.remoteTestRoot = automation._devicemanager.deviceRoot + \
+ '/reftest'
+ options.remoteProfile = options.remoteTestRoot + "/profile"
+
+ if options.remoteWebServer is None:
+ options.remoteWebServer = self.get_ip()
+
+ # Verify that our remotewebserver is set properly
+ if options.remoteWebServer == '127.0.0.1':
+ self.error("ERROR: Either you specified the loopback for the remote webserver or ",
+ "your local IP cannot be detected. Please provide the local ip in --remote-webserver")
+
+ if not options.httpPort:
+ options.httpPort = automation.DEFAULT_HTTP_PORT
+
+ if not options.sslPort:
+ options.sslPort = automation.DEFAULT_SSL_PORT
+
+ # One of remoteAppPath (relative path to application) or the app (executable) must be
+ # set, but not both. If both are set, we destroy the user's selection for app
+ # so instead of silently destroying a user specificied setting, we
+ # error.
+ if options.remoteAppPath and options.app:
+ self.error(
+ "ERROR: You cannot specify both the remoteAppPath and the app")
+ elif options.remoteAppPath:
+ options.app = options.remoteTestRoot + "/" + options.remoteAppPath
+ elif options.app is None:
+ # Neither remoteAppPath nor app are set -- error
+ self.error("ERROR: You must specify either appPath or app")
+
+ if options.xrePath is None:
+ self.error(
+ "ERROR: You must specify the path to the controller xre directory")
+ else:
+ # Ensure xrepath is a full path
+ options.xrePath = os.path.abspath(options.xrePath)
+
+ options.localLogName = options.remoteLogFile
+ options.remoteLogFile = options.remoteTestRoot + \
+ '/' + options.remoteLogFile
+
+ # Ensure that the options.logfile (which the base class uses) is set to
+ # the remote setting when running remote. Also, if the user set the
+ # log file name there, use that instead of reusing the remotelogfile as
+ # above.
+ if options.logFile:
+ # If the user specified a local logfile name use that
+ options.localLogName = options.logFile
+
+ options.logFile = options.remoteLogFile
+
+ if options.pidFile != "":
+ with open(options.pidFile, 'w') as f:
+ f.write(str(os.getpid()))
+
+ # httpd-path is specified by standard makefile targets and may be specified
+ # on the command line to select a particular version of httpd.js. If not
+ # specified, try to select the one from hostutils.zip, as required in
+ # bug 882932.
+ if not options.httpdPath:
+ options.httpdPath = os.path.join(options.utilityPath, "components")
+
+ if not options.ignoreWindowSize:
+ parts = automation._devicemanager.getInfo(
+ 'screen')['screen'][0].split()
+ width = int(parts[0].split(':')[1])
+ height = int(parts[1].split(':')[1])
+ if (width < 1366 or height < 1050):
+ self.error("ERROR: Invalid screen resolution %sx%s, please adjust to 1366x1050 or higher" % (
+ width, height))
+
+ # Disable e10s by default on Android because we don't run Android
+ # e10s jobs anywhere yet.
+ options.e10s = False
+ return options
diff --git a/layout/tools/reftest/remotereftest.py b/layout/tools/reftest/remotereftest.py
new file mode 100644
index 000000000..f871c7586
--- /dev/null
+++ b/layout/tools/reftest/remotereftest.py
@@ -0,0 +1,412 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+import os
+import time
+import tempfile
+import traceback
+import urllib2
+
+import mozdevice
+import mozinfo
+from automation import Automation
+from remoteautomation import RemoteAutomation, fennecLogcatFilters
+
+from output import OutputHandler
+from runreftest import RefTest, ReftestResolver
+import reftestcommandline
+
+# We need to know our current directory so that we can serve our test files from it.
+SCRIPT_DIRECTORY = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
+
+
+class RemoteReftestResolver(ReftestResolver):
+ def absManifestPath(self, path):
+ script_abs_path = os.path.join(SCRIPT_DIRECTORY, path)
+ if os.path.exists(script_abs_path):
+ rv = script_abs_path
+ elif os.path.exists(os.path.abspath(path)):
+ rv = os.path.abspath(path)
+ else:
+ print >> sys.stderr, "Could not find manifest %s" % script_abs_path
+ sys.exit(1)
+ return os.path.normpath(rv)
+
+ def manifestURL(self, options, path):
+ # Dynamically build the reftest URL if possible, beware that args[0] should exist 'inside' the webroot
+ # It's possible for this url to have a leading "..", but reftest.js will fix that up
+ relPath = os.path.relpath(path, SCRIPT_DIRECTORY)
+ return "http://%s:%s/%s" % (options.remoteWebServer, options.httpPort, relPath)
+
+
+class ReftestServer:
+ """ Web server used to serve Reftests, for closer fidelity to the real web.
+ It is virtually identical to the server used in mochitest and will only
+ be used for running reftests remotely.
+ Bug 581257 has been filed to refactor this wrapper around httpd.js into
+ it's own class and use it in both remote and non-remote testing. """
+
+ def __init__(self, automation, options, scriptDir):
+ self.automation = automation
+ self._utilityPath = options.utilityPath
+ self._xrePath = options.xrePath
+ self._profileDir = options.serverProfilePath
+ self.webServer = options.remoteWebServer
+ self.httpPort = options.httpPort
+ self.scriptDir = scriptDir
+ self.pidFile = options.pidFile
+ self._httpdPath = os.path.abspath(options.httpdPath)
+ self.shutdownURL = "http://%(server)s:%(port)s/server/shutdown" % { "server" : self.webServer, "port" : self.httpPort }
+
+ def start(self):
+ "Run the Refest server, returning the process ID of the server."
+
+ env = self.automation.environment(xrePath = self._xrePath)
+ env["XPCOM_DEBUG_BREAK"] = "warn"
+ if self.automation.IS_WIN32:
+ env["PATH"] = env["PATH"] + ";" + self._xrePath
+
+ args = ["-g", self._xrePath,
+ "-v", "170",
+ "-f", os.path.join(self._httpdPath, "httpd.js"),
+ "-e", "const _PROFILE_PATH = '%(profile)s';const _SERVER_PORT = '%(port)s'; const _SERVER_ADDR ='%(server)s';" %
+ {"profile" : self._profileDir.replace('\\', '\\\\'), "port" : self.httpPort, "server" : self.webServer },
+ "-f", os.path.join(self.scriptDir, "server.js")]
+
+ xpcshell = os.path.join(self._utilityPath,
+ "xpcshell" + self.automation.BIN_SUFFIX)
+
+ if not os.access(xpcshell, os.F_OK):
+ raise Exception('xpcshell not found at %s' % xpcshell)
+ if self.automation.elf_arm(xpcshell):
+ raise Exception('xpcshell at %s is an ARM binary; please use '
+ 'the --utility-path argument to specify the path '
+ 'to a desktop version.' % xpcshell)
+
+ self._process = self.automation.Process([xpcshell] + args, env = env)
+ pid = self._process.pid
+ if pid < 0:
+ print "TEST-UNEXPECTED-FAIL | remotereftests.py | Error starting server."
+ return 2
+ self.automation.log.info("INFO | remotereftests.py | Server pid: %d", pid)
+
+ if (self.pidFile != ""):
+ f = open(self.pidFile + ".xpcshell.pid", 'w')
+ f.write("%s" % pid)
+ f.close()
+
+ def ensureReady(self, timeout):
+ assert timeout >= 0
+
+ aliveFile = os.path.join(self._profileDir, "server_alive.txt")
+ i = 0
+ while i < timeout:
+ if os.path.exists(aliveFile):
+ break
+ time.sleep(1)
+ i += 1
+ else:
+ print "TEST-UNEXPECTED-FAIL | remotereftests.py | Timed out while waiting for server startup."
+ self.stop()
+ return 1
+
+ def stop(self):
+ if hasattr(self, '_process'):
+ try:
+ c = urllib2.urlopen(self.shutdownURL)
+ c.read()
+ c.close()
+
+ rtncode = self._process.poll()
+ if (rtncode == None):
+ self._process.terminate()
+ except:
+ self._process.kill()
+
+class RemoteReftest(RefTest):
+ use_marionette = False
+ remoteApp = ''
+ resolver_cls = RemoteReftestResolver
+
+ def __init__(self, automation, devicemanager, options, scriptDir):
+ RefTest.__init__(self)
+ self.automation = automation
+ self._devicemanager = devicemanager
+ self.scriptDir = scriptDir
+ self.remoteApp = options.app
+ self.remoteProfile = options.remoteProfile
+ self.remoteTestRoot = options.remoteTestRoot
+ self.remoteLogFile = options.remoteLogFile
+ self.localLogName = options.localLogName
+ self.pidFile = options.pidFile
+ if self.automation.IS_DEBUG_BUILD:
+ self.SERVER_STARTUP_TIMEOUT = 180
+ else:
+ self.SERVER_STARTUP_TIMEOUT = 90
+ self.automation.deleteANRs()
+ self.automation.deleteTombstones()
+
+ self._populate_logger(options)
+ outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
+ # RemoteAutomation.py's 'messageLogger' is also used by mochitest. Mimic a mochitest
+ # MessageLogger object to re-use this code path.
+ outputHandler.write = outputHandler.__call__
+ self.automation._processArgs['messageLogger'] = outputHandler
+
+ def findPath(self, paths, filename = None):
+ for path in paths:
+ p = path
+ if filename:
+ p = os.path.join(p, filename)
+ if os.path.exists(self.getFullPath(p)):
+ return path
+ return None
+
+ def startWebServer(self, options):
+ """ Create the webserver on the host and start it up """
+ remoteXrePath = options.xrePath
+ remoteUtilityPath = options.utilityPath
+ localAutomation = Automation()
+ localAutomation.IS_WIN32 = False
+ localAutomation.IS_LINUX = False
+ localAutomation.IS_MAC = False
+ localAutomation.UNIXISH = False
+ hostos = sys.platform
+ if (hostos == 'mac' or hostos == 'darwin'):
+ localAutomation.IS_MAC = True
+ elif (hostos == 'linux' or hostos == 'linux2'):
+ localAutomation.IS_LINUX = True
+ localAutomation.UNIXISH = True
+ elif (hostos == 'win32' or hostos == 'win64'):
+ localAutomation.BIN_SUFFIX = ".exe"
+ localAutomation.IS_WIN32 = True
+
+ paths = [options.xrePath, localAutomation.DIST_BIN, self.automation._product, os.path.join('..', self.automation._product)]
+ options.xrePath = self.findPath(paths)
+ if options.xrePath == None:
+ print "ERROR: unable to find xulrunner path for %s, please specify with --xre-path" % (os.name)
+ return 1
+ paths.append("bin")
+ paths.append(os.path.join("..", "bin"))
+
+ xpcshell = "xpcshell"
+ if (os.name == "nt"):
+ xpcshell += ".exe"
+
+ if (options.utilityPath):
+ paths.insert(0, options.utilityPath)
+ options.utilityPath = self.findPath(paths, xpcshell)
+ if options.utilityPath == None:
+ print "ERROR: unable to find utility path for %s, please specify with --utility-path" % (os.name)
+ return 1
+
+ options.serverProfilePath = tempfile.mkdtemp()
+ self.server = ReftestServer(localAutomation, options, self.scriptDir)
+ retVal = self.server.start()
+ if retVal:
+ return retVal
+ retVal = self.server.ensureReady(self.SERVER_STARTUP_TIMEOUT)
+ if retVal:
+ return retVal
+
+ options.xrePath = remoteXrePath
+ options.utilityPath = remoteUtilityPath
+ return 0
+
+ def stopWebServer(self, options):
+ self.server.stop()
+
+ def createReftestProfile(self, options, manifest):
+ profile = RefTest.createReftestProfile(self,
+ options,
+ manifest,
+ server=options.remoteWebServer,
+ port=options.httpPort)
+ profileDir = profile.profile
+
+ prefs = {}
+ prefs["app.update.url.android"] = ""
+ prefs["browser.firstrun.show.localepicker"] = False
+ prefs["reftest.remote"] = True
+ prefs["datareporting.policy.dataSubmissionPolicyBypassAcceptance"] = True
+
+ prefs["layout.css.devPixelsPerPx"] = "1.0"
+ # Because Fennec is a little wacky (see bug 1156817) we need to load the
+ # reftest pages at 1.0 zoom, rather than zooming to fit the CSS viewport.
+ prefs["apz.allow_zooming"] = False
+
+ # Set the extra prefs.
+ profile.set_preferences(prefs)
+
+ try:
+ self._devicemanager.pushDir(profileDir, options.remoteProfile)
+ self._devicemanager.chmodDir(options.remoteProfile)
+ except mozdevice.DMError:
+ print "Automation Error: Failed to copy profiledir to device"
+ raise
+
+ return profile
+
+ def copyExtraFilesToProfile(self, options, profile):
+ profileDir = profile.profile
+ RefTest.copyExtraFilesToProfile(self, options, profile)
+ try:
+ self._devicemanager.pushDir(profileDir, options.remoteProfile)
+ self._devicemanager.chmodDir(options.remoteProfile)
+ except mozdevice.DMError:
+ print "Automation Error: Failed to copy extra files to device"
+ raise
+
+ def printDeviceInfo(self, printLogcat=False):
+ try:
+ if printLogcat:
+ logcat = self._devicemanager.getLogcat(filterOutRegexps=fennecLogcatFilters)
+ print ''.join(logcat)
+ print "Device info:"
+ devinfo = self._devicemanager.getInfo()
+ for category in devinfo:
+ if type(devinfo[category]) is list:
+ print " %s:" % category
+ for item in devinfo[category]:
+ print " %s" % item
+ else:
+ print " %s: %s" % (category, devinfo[category])
+ print "Test root: %s" % self._devicemanager.deviceRoot
+ except mozdevice.DMError:
+ print "WARNING: Error getting device information"
+
+ def environment(self, **kwargs):
+ return self.automation.environment(**kwargs)
+
+ def buildBrowserEnv(self, options, profileDir):
+ browserEnv = RefTest.buildBrowserEnv(self, options, profileDir)
+ # remove desktop environment not used on device
+ if "XPCOM_MEM_BLOAT_LOG" in browserEnv:
+ del browserEnv["XPCOM_MEM_BLOAT_LOG"]
+ return browserEnv
+
+ def runApp(self, profile, binary, cmdargs, env,
+ timeout=None, debuggerInfo=None,
+ symbolsPath=None, options=None,
+ valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
+ status = self.automation.runApp(None, env,
+ binary,
+ profile.profile,
+ cmdargs,
+ utilityPath=options.utilityPath,
+ xrePath=options.xrePath,
+ debuggerInfo=debuggerInfo,
+ symbolsPath=symbolsPath,
+ timeout=timeout)
+ return status
+
+ def cleanup(self, profileDir):
+ # Pull results back from device
+ if self.remoteLogFile and \
+ self._devicemanager.fileExists(self.remoteLogFile):
+ self._devicemanager.getFile(self.remoteLogFile, self.localLogName)
+ else:
+ print "WARNING: Unable to retrieve log file (%s) from remote " \
+ "device" % self.remoteLogFile
+ self._devicemanager.removeDir(self.remoteProfile)
+ self._devicemanager.removeDir(self.remoteTestRoot)
+ RefTest.cleanup(self, profileDir)
+ if (self.pidFile != ""):
+ try:
+ os.remove(self.pidFile)
+ os.remove(self.pidFile + ".xpcshell.pid")
+ except:
+ print "Warning: cleaning up pidfile '%s' was unsuccessful from the test harness" % self.pidFile
+
+
+def run_test_harness(parser, options):
+ if options.dm_trans == 'sut' and options.deviceIP == None:
+ print "Error: If --dm_trans = sut, you must provide a device IP to connect to via the --deviceIP option"
+ return 1
+
+ dm_args = {
+ 'deviceRoot': options.remoteTestRoot,
+ 'host': options.deviceIP,
+ 'port': options.devicePort,
+ }
+
+ dm_cls = mozdevice.DroidSUT
+ if options.dm_trans == 'adb':
+ dm_args['adbPath'] = options.adb_path
+ if not dm_args['host']:
+ dm_args['deviceSerial'] = options.deviceSerial
+ dm_cls = mozdevice.DroidADB
+
+ try:
+ dm = dm_cls(**dm_args)
+ except mozdevice.DMError:
+ traceback.print_exc()
+ print "Automation Error: exception while initializing devicemanager. Most likely the device is not in a testable state."
+ return 1
+
+ automation = RemoteAutomation(None)
+ automation.setDeviceManager(dm)
+
+ if options.remoteProductName:
+ automation.setProduct(options.remoteProductName)
+
+ # Set up the defaults and ensure options are set
+ parser.validate_remote(options, automation)
+
+ # Check that Firefox is installed
+ expected = options.app.split('/')[-1]
+ installed = dm.shellCheckOutput(['pm', 'list', 'packages', expected])
+ if expected not in installed:
+ print "%s is not installed on this device" % expected
+ return 1
+
+ automation.setAppName(options.app)
+ automation.setRemoteProfile(options.remoteProfile)
+ automation.setRemoteLog(options.remoteLogFile)
+ reftest = RemoteReftest(automation, dm, options, SCRIPT_DIRECTORY)
+ parser.validate(options, reftest)
+
+ if mozinfo.info['debug']:
+ print "changing timeout for remote debug reftests from %s to 600 seconds" % options.timeout
+ options.timeout = 600
+
+ # Hack in a symbolic link for jsreftest
+ os.system("ln -s ../jsreftest " + str(os.path.join(SCRIPT_DIRECTORY, "jsreftest")))
+
+ # Start the webserver
+ retVal = reftest.startWebServer(options)
+ if retVal:
+ return retVal
+
+ procName = options.app.split('/')[-1]
+ if (dm.processExist(procName)):
+ dm.killProcess(procName)
+
+ if options.printDeviceInfo:
+ reftest.printDeviceInfo()
+
+#an example manifest name to use on the cli
+# manifest = "http://" + options.remoteWebServer + "/reftests/layout/reftests/reftest-sanity/reftest.list"
+ retVal = 0
+ try:
+ dm.recordLogcat()
+ retVal = reftest.runTests(options.tests, options)
+ except:
+ print "Automation Error: Exception caught while running tests"
+ traceback.print_exc()
+ retVal = 1
+
+ reftest.stopWebServer(options)
+
+ if options.printDeviceInfo:
+ reftest.printDeviceInfo(printLogcat=True)
+
+ return retVal
+
+
+if __name__ == "__main__":
+ parser = reftestcommandline.RemoteArgumentsParser()
+ options = parser.parse_args()
+ sys.exit(run_test_harness(parser, options))
diff --git a/layout/tools/reftest/runreftest.py b/layout/tools/reftest/runreftest.py
new file mode 100644
index 000000000..e1c20ccd9
--- /dev/null
+++ b/layout/tools/reftest/runreftest.py
@@ -0,0 +1,747 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Runs the reftest test harness.
+"""
+
+import collections
+import json
+import multiprocessing
+import os
+import platform
+import re
+import shutil
+import signal
+import subprocess
+import sys
+import threading
+
+SCRIPT_DIRECTORY = os.path.abspath(
+ os.path.realpath(os.path.dirname(__file__)))
+if SCRIPT_DIRECTORY not in sys.path:
+ sys.path.insert(0, SCRIPT_DIRECTORY)
+
+import mozcrash
+import mozdebug
+import mozinfo
+import mozleak
+import mozlog
+import mozprocess
+import mozprofile
+import mozrunner
+from mozrunner.utils import get_stack_fixer_function, test_environment
+from mozscreenshot import printstatus, dump_screen
+
+try:
+ from marionette_driver.addons import Addons
+ from marionette_harness import Marionette
+except ImportError, e:
+ # Defer ImportError until attempt to use Marionette
+ def reraise(*args, **kwargs):
+ raise(e)
+ Marionette = reraise
+
+from output import OutputHandler, ReftestFormatter
+import reftestcommandline
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+try:
+ from mozbuild.base import MozbuildObject
+ build_obj = MozbuildObject.from_environment(cwd=here)
+except ImportError:
+ build_obj = None
+
+
+def categoriesToRegex(categoryList):
+ return "\\(" + ', '.join(["(?P<%s>\\d+) %s" % c for c in categoryList]) + "\\)"
+summaryLines = [('Successful', [('pass', 'pass'), ('loadOnly', 'load only')]),
+ ('Unexpected', [('fail', 'unexpected fail'),
+ ('pass', 'unexpected pass'),
+ ('asserts', 'unexpected asserts'),
+ ('fixedAsserts', 'unexpected fixed asserts'),
+ ('failedLoad', 'failed load'),
+ ('exception', 'exception')]),
+ ('Known problems', [('knownFail', 'known fail'),
+ ('knownAsserts', 'known asserts'),
+ ('random', 'random'),
+ ('skipped', 'skipped'),
+ ('slow', 'slow')])]
+
+# Python's print is not threadsafe.
+printLock = threading.Lock()
+
+
+class ReftestThread(threading.Thread):
+ def __init__(self, cmdargs):
+ threading.Thread.__init__(self)
+ self.cmdargs = cmdargs
+ self.summaryMatches = {}
+ self.retcode = -1
+ for text, _ in summaryLines:
+ self.summaryMatches[text] = None
+
+ def run(self):
+ with printLock:
+ print "Starting thread with", self.cmdargs
+ sys.stdout.flush()
+ process = subprocess.Popen(self.cmdargs, stdout=subprocess.PIPE)
+ for chunk in self.chunkForMergedOutput(process.stdout):
+ with printLock:
+ print chunk,
+ sys.stdout.flush()
+ self.retcode = process.wait()
+
+ def chunkForMergedOutput(self, logsource):
+ """Gather lines together that should be printed as one atomic unit.
+ Individual test results--anything between 'REFTEST TEST-START' and
+ 'REFTEST TEST-END' lines--are an atomic unit. Lines with data from
+ summaries are parsed and the data stored for later aggregation.
+ Other lines are considered their own atomic units and are permitted
+ to intermix freely."""
+ testStartRegex = re.compile("^REFTEST TEST-START")
+ testEndRegex = re.compile("^REFTEST TEST-END")
+ summaryHeadRegex = re.compile("^REFTEST INFO \\| Result summary:")
+ summaryRegexFormatString = "^REFTEST INFO \\| (?P<message>{text}): (?P<total>\\d+) {regex}"
+ summaryRegexStrings = [summaryRegexFormatString.format(text=text,
+ regex=categoriesToRegex(categories))
+ for (text, categories) in summaryLines]
+ summaryRegexes = [re.compile(regex) for regex in summaryRegexStrings]
+
+ for line in logsource:
+ if testStartRegex.search(line) is not None:
+ chunkedLines = [line]
+ for lineToBeChunked in logsource:
+ chunkedLines.append(lineToBeChunked)
+ if testEndRegex.search(lineToBeChunked) is not None:
+ break
+ yield ''.join(chunkedLines)
+ continue
+
+ haveSuppressedSummaryLine = False
+ for regex in summaryRegexes:
+ match = regex.search(line)
+ if match is not None:
+ self.summaryMatches[match.group('message')] = match
+ haveSuppressedSummaryLine = True
+ break
+ if haveSuppressedSummaryLine:
+ continue
+
+ if summaryHeadRegex.search(line) is None:
+ yield line
+
+class ReftestResolver(object):
+ def defaultManifest(self, suite):
+ return {"reftest": "reftest.list",
+ "crashtest": "crashtests.list",
+ "jstestbrowser": "jstests.list"}[suite]
+
+ def directoryManifest(self, suite, path):
+ return os.path.join(path, self.defaultManifest(suite))
+
+ def findManifest(self, suite, test_file, subdirs=True):
+ """Return a tuple of (manifest-path, filter-string) for running test_file.
+
+ test_file is a path to a test or a manifest file
+ """
+ rv = []
+ default_manifest = self.defaultManifest(suite)
+ if not os.path.isabs(test_file):
+ test_file = self.absManifestPath(test_file)
+
+ if os.path.isdir(test_file):
+ for dirpath, dirnames, filenames in os.walk(test_file):
+ if default_manifest in filenames:
+ rv.append((os.path.join(dirpath, default_manifest), None))
+ # We keep recursing into subdirectories which means that in the case
+ # of include directives we get the same manifest multiple times.
+ # However reftest.js will only read each manifest once
+
+ elif test_file.endswith('.list'):
+ if os.path.exists(test_file):
+ rv = [(test_file, None)]
+ else:
+ dirname, pathname = os.path.split(test_file)
+ found = True
+ while not os.path.exists(os.path.join(dirname, default_manifest)):
+ dirname, suffix = os.path.split(dirname)
+ pathname = os.path.join(suffix, pathname)
+ if os.path.dirname(dirname) == dirname:
+ found = False
+ break
+ if found:
+ rv = [(os.path.join(dirname, default_manifest),
+ r".*(?:/|\\)%s(?:[#?].*)?$" % pathname)]
+
+ return rv
+
+ def absManifestPath(self, path):
+ return os.path.normpath(os.path.abspath(path))
+
+ def manifestURL(self, options, path):
+ return "file://%s" % path
+
+ def resolveManifests(self, options, tests):
+ suite = options.suite
+ manifests = {}
+ for testPath in tests:
+ for manifest, filter_str in self.findManifest(suite, testPath):
+ manifest = self.manifestURL(options, manifest)
+ if manifest not in manifests:
+ manifests[manifest] = set()
+ manifests[manifest].add(filter_str)
+
+ for key in manifests.iterkeys():
+ if None in manifests[key]:
+ manifests[key] = None
+ else:
+ manifests[key] = "|".join(list(manifests[key]))
+ return manifests
+
+
+class RefTest(object):
+ use_marionette = True
+ oldcwd = os.getcwd()
+ resolver_cls = ReftestResolver
+
+ def __init__(self):
+ self.update_mozinfo()
+ self.lastTestSeen = 'reftest'
+ self.haveDumpedScreen = False
+ self.resolver = self.resolver_cls()
+ self.log = None
+
+ def _populate_logger(self, options):
+ if self.log:
+ return
+
+ mozlog.commandline.log_formatters["tbpl"] = (ReftestFormatter,
+ "Reftest specific formatter for the"
+ "benefit of legacy log parsers and"
+ "tools such as the reftest analyzer")
+ fmt_options = {}
+ if not options.log_tbpl_level and os.environ.get('MOZ_REFTEST_VERBOSE'):
+ options.log_tbpl_level = fmt_options['level'] = 'debug'
+ self.log = mozlog.commandline.setup_logging(
+ "reftest harness", options, {"tbpl": sys.stdout}, fmt_options)
+
+ def update_mozinfo(self):
+ """walk up directories to find mozinfo.json update the info"""
+ # TODO: This should go in a more generic place, e.g. mozinfo
+
+ path = SCRIPT_DIRECTORY
+ dirs = set()
+ while path != os.path.expanduser('~'):
+ if path in dirs:
+ break
+ dirs.add(path)
+ path = os.path.split(path)[0]
+ mozinfo.find_and_update_from_json(*dirs)
+
+ def getFullPath(self, path):
+ "Get an absolute path relative to self.oldcwd."
+ return os.path.normpath(os.path.join(self.oldcwd, os.path.expanduser(path)))
+
+ def createReftestProfile(self, options, manifests, server='localhost', port=0,
+ profile_to_clone=None):
+ """Sets up a profile for reftest.
+
+ :param options: Object containing command line options
+ :param manifests: Dictionary of the form {manifest_path: [filters]}
+ :param server: Server name to use for http tests
+ :param profile_to_clone: Path to a profile to use as the basis for the
+ test profile
+ """
+
+ locations = mozprofile.permissions.ServerLocations()
+ locations.add_host(server, scheme='http', port=port)
+ locations.add_host(server, scheme='https', port=port)
+
+ # Set preferences for communication between our command line arguments
+ # and the reftest harness. Preferences that are required for reftest
+ # to work should instead be set in reftest-preferences.js .
+ prefs = {}
+ prefs['reftest.timeout'] = options.timeout * 1000
+ if options.totalChunks:
+ prefs['reftest.totalChunks'] = options.totalChunks
+ if options.thisChunk:
+ prefs['reftest.thisChunk'] = options.thisChunk
+ if options.logFile:
+ prefs['reftest.logFile'] = options.logFile
+ if options.ignoreWindowSize:
+ prefs['reftest.ignoreWindowSize'] = True
+ if options.shuffle:
+ prefs['reftest.shuffle'] = True
+ if options.repeat:
+ prefs['reftest.repeat'] = options.repeat
+ if options.runUntilFailure:
+ prefs['reftest.runUntilFailure'] = True
+ prefs['reftest.focusFilterMode'] = options.focusFilterMode
+ prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
+ prefs['reftest.manifests'] = json.dumps(manifests)
+
+ if options.e10s:
+ prefs['browser.tabs.remote.autostart'] = True
+ prefs['extensions.e10sBlocksEnabling'] = False
+
+ # Bug 1262954: For winXP + e10s disable acceleration
+ if platform.system() in ("Windows", "Microsoft") and \
+ '5.1' in platform.version() and options.e10s:
+ prefs['layers.acceleration.disabled'] = True
+
+ # Bug 1300355: Disable canvas cache for win7 as it uses
+ # too much memory and causes OOMs.
+ if platform.system() in ("Windows", "Microsoft") and \
+ '6.1' in platform.version():
+ prefs['reftest.nocache'] = True
+
+ if options.marionette:
+ port = options.marionette.split(':')[1]
+ prefs['marionette.defaultPrefs.port'] = int(port)
+
+ preference_file = os.path.join(here, 'reftest-preferences.js')
+ prefs.update(mozprofile.Preferences.read_prefs(preference_file))
+
+ for v in options.extraPrefs:
+ thispref = v.split('=')
+ if len(thispref) < 2:
+ print "Error: syntax error in --setpref=" + v
+ sys.exit(1)
+ prefs[thispref[0]] = thispref[1].strip()
+
+ addons = []
+ if not self.use_marionette:
+ addons.append(options.reftestExtensionPath)
+
+ if options.specialPowersExtensionPath is not None:
+ if not self.use_marionette:
+ addons.append(options.specialPowersExtensionPath)
+ # SpecialPowers requires insecure automation-only features that we
+ # put behind a pref.
+ prefs['security.turn_off_all_security_so_that_viruses_can_take_over_this_computer'] = True
+
+ for pref in prefs:
+ prefs[pref] = mozprofile.Preferences.cast(prefs[pref])
+
+ # Install distributed extensions, if application has any.
+ distExtDir = os.path.join(options.app[:options.app.rfind(os.sep)],
+ "distribution", "extensions")
+ if os.path.isdir(distExtDir):
+ for f in os.listdir(distExtDir):
+ addons.append(os.path.join(distExtDir, f))
+
+ # Install custom extensions.
+ for f in options.extensionsToInstall:
+ addons.append(self.getFullPath(f))
+
+ kwargs = {'addons': addons,
+ 'preferences': prefs,
+ 'locations': locations}
+ if profile_to_clone:
+ profile = mozprofile.Profile.clone(profile_to_clone, **kwargs)
+ else:
+ profile = mozprofile.Profile(**kwargs)
+
+ self.copyExtraFilesToProfile(options, profile)
+ return profile
+
+ def environment(self, **kwargs):
+ kwargs['log'] = self.log
+ return test_environment(**kwargs)
+
+ def buildBrowserEnv(self, options, profileDir):
+ browserEnv = self.environment(
+ xrePath=options.xrePath, debugger=options.debugger)
+ browserEnv["XPCOM_DEBUG_BREAK"] = "stack"
+
+ if mozinfo.info["asan"]:
+ # Disable leak checking for reftests for now
+ if "ASAN_OPTIONS" in browserEnv:
+ browserEnv["ASAN_OPTIONS"] += ":detect_leaks=0"
+ else:
+ browserEnv["ASAN_OPTIONS"] = "detect_leaks=0"
+
+ for v in options.environment:
+ ix = v.find("=")
+ if ix <= 0:
+ print "Error: syntax error in --setenv=" + v
+ return None
+ browserEnv[v[:ix]] = v[ix + 1:]
+
+ # Enable leaks detection to its own log file.
+ self.leakLogFile = os.path.join(profileDir, "runreftest_leaks.log")
+ browserEnv["XPCOM_MEM_BLOAT_LOG"] = self.leakLogFile
+ return browserEnv
+
+ def killNamedOrphans(self, pname):
+ """ Kill orphan processes matching the given command name """
+ self.log.info("Checking for orphan %s processes..." % pname)
+
+ def _psInfo(line):
+ if pname in line:
+ self.log.info(line)
+ process = mozprocess.ProcessHandler(['ps', '-f'],
+ processOutputLine=_psInfo)
+ process.run()
+ process.wait()
+
+ def _psKill(line):
+ parts = line.split()
+ if len(parts) == 3 and parts[0].isdigit():
+ pid = int(parts[0])
+ if parts[2] == pname and parts[1] == '1':
+ self.log.info("killing %s orphan with pid %d" % (pname, pid))
+ try:
+ os.kill(
+ pid, getattr(signal, "SIGKILL", signal.SIGTERM))
+ except Exception as e:
+ self.log.info("Failed to kill process %d: %s" %
+ (pid, str(e)))
+ process = mozprocess.ProcessHandler(['ps', '-o', 'pid,ppid,comm'],
+ processOutputLine=_psKill)
+ process.run()
+ process.wait()
+
+ def cleanup(self, profileDir):
+ if profileDir:
+ shutil.rmtree(profileDir, True)
+
+ def runTests(self, tests, options, cmdargs=None):
+ cmdargs = cmdargs or []
+ self._populate_logger(options)
+
+ # Despite our efforts to clean up servers started by this script, in practice
+ # we still see infrequent cases where a process is orphaned and interferes
+ # with future tests, typically because the old server is keeping the port in use.
+ # Try to avoid those failures by checking for and killing orphan servers before
+ # trying to start new ones.
+ self.killNamedOrphans('ssltunnel')
+ self.killNamedOrphans('xpcshell')
+
+ if options.cleanupCrashes:
+ mozcrash.cleanup_pending_crash_reports()
+
+ manifests = self.resolver.resolveManifests(options, tests)
+ if options.filter:
+ manifests[""] = options.filter
+
+ if not getattr(options, 'runTestsInParallel', False):
+ return self.runSerialTests(manifests, options, cmdargs)
+
+ cpuCount = multiprocessing.cpu_count()
+
+ # We have the directive, technology, and machine to run multiple test instances.
+ # Experimentation says that reftests are not overly CPU-intensive, so we can run
+ # multiple jobs per CPU core.
+ #
+ # Our Windows machines in automation seem to get upset when we run a lot of
+ # simultaneous tests on them, so tone things down there.
+ if sys.platform == 'win32':
+ jobsWithoutFocus = cpuCount
+ else:
+ jobsWithoutFocus = 2 * cpuCount
+
+ totalJobs = jobsWithoutFocus + 1
+ perProcessArgs = [sys.argv[:] for i in range(0, totalJobs)]
+
+ host = 'localhost'
+ port = 2828
+ if options.marionette:
+ host, port = options.marionette.split(':')
+
+ # First job is only needs-focus tests. Remaining jobs are
+ # non-needs-focus and chunked.
+ perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus")
+ for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1):
+ jobArgs[-1:-1] = ["--focus-filter-mode=non-needs-focus",
+ "--total-chunks=%d" % jobsWithoutFocus,
+ "--this-chunk=%d" % chunkNumber,
+ "--marionette=%s:%d" % (host, port)]
+ port += 1
+
+ for jobArgs in perProcessArgs:
+ try:
+ jobArgs.remove("--run-tests-in-parallel")
+ except:
+ pass
+ jobArgs[0:0] = [sys.executable, "-u"]
+
+ threads = [ReftestThread(args) for args in perProcessArgs[1:]]
+ for t in threads:
+ t.start()
+
+ while True:
+ # The test harness in each individual thread will be doing timeout
+ # handling on its own, so we shouldn't need to worry about any of
+ # the threads hanging for arbitrarily long.
+ for t in threads:
+ t.join(10)
+ if not any(t.is_alive() for t in threads):
+ break
+
+ # Run the needs-focus tests serially after the other ones, so we don't
+ # have to worry about races between the needs-focus tests *actually*
+ # needing focus and the dummy windows in the non-needs-focus tests
+ # trying to focus themselves.
+ focusThread = ReftestThread(perProcessArgs[0])
+ focusThread.start()
+ focusThread.join()
+
+ # Output the summaries that the ReftestThread filters suppressed.
+ summaryObjects = [collections.defaultdict(int) for s in summaryLines]
+ for t in threads:
+ for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines):
+ threadMatches = t.summaryMatches[text]
+ for (attribute, description) in categories:
+ amount = int(
+ threadMatches.group(attribute) if threadMatches else 0)
+ summaryObj[attribute] += amount
+ amount = int(
+ threadMatches.group('total') if threadMatches else 0)
+ summaryObj['total'] += amount
+
+ print 'REFTEST INFO | Result summary:'
+ for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines):
+ details = ', '.join(["%d %s" % (summaryObj[attribute], description) for (
+ attribute, description) in categories])
+ print 'REFTEST INFO | ' + text + ': ' + str(summaryObj['total']) + ' (' + details + ')'
+
+ return int(any(t.retcode != 0 for t in threads))
+
+ def handleTimeout(self, timeout, proc, utilityPath, debuggerInfo):
+ """handle process output timeout"""
+ # TODO: bug 913975 : _processOutput should call self.processOutputLine
+ # one more time one timeout (I think)
+ self.log.error("%s | application timed out after %d seconds with no output" % (self.lastTestSeen, int(timeout)))
+ self.log.error("Force-terminating active process(es).");
+ self.killAndGetStack(
+ proc, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
+
+ def dumpScreen(self, utilityPath):
+ if self.haveDumpedScreen:
+ self.log.info("Not taking screenshot here: see the one that was previously logged")
+ return
+ self.haveDumpedScreen = True
+ dump_screen(utilityPath, self.log)
+
+ def killAndGetStack(self, process, utilityPath, debuggerInfo, dump_screen=False):
+ """
+ Kill the process, preferrably in a way that gets us a stack trace.
+ Also attempts to obtain a screenshot before killing the process
+ if specified.
+ """
+
+ if dump_screen:
+ self.dumpScreen(utilityPath)
+
+ if mozinfo.info.get('crashreporter', True) and not debuggerInfo:
+ if mozinfo.isWin:
+ # We should have a "crashinject" program in our utility path
+ crashinject = os.path.normpath(
+ os.path.join(utilityPath, "crashinject.exe"))
+ if os.path.exists(crashinject):
+ status = subprocess.Popen(
+ [crashinject, str(process.pid)]).wait()
+ printstatus("crashinject", status)
+ if status == 0:
+ return
+ else:
+ try:
+ process.kill(sig=signal.SIGABRT)
+ except OSError:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=921509
+ self.log.info("Can't trigger Breakpad, process no longer exists")
+ return
+ self.log.info("Can't trigger Breakpad, just killing process")
+ process.kill()
+
+ def runApp(self, profile, binary, cmdargs, env,
+ timeout=None, debuggerInfo=None,
+ symbolsPath=None, options=None,
+ valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
+
+ def timeoutHandler():
+ self.handleTimeout(
+ timeout, proc, options.utilityPath, debuggerInfo)
+
+ interactive = False
+ debug_args = None
+ if debuggerInfo:
+ interactive = debuggerInfo.interactive
+ debug_args = [debuggerInfo.path] + debuggerInfo.args
+
+ def record_last_test(message):
+ """Records the last test seen by this harness for the benefit of crash logging."""
+ if message['action'] == 'test_start':
+ if " " in message['test']:
+ self.lastTestSeen = message['test'].split(" ")[0]
+ else:
+ self.lastTestSeen = message['test']
+
+ self.log.add_handler(record_last_test)
+
+ outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=symbolsPath)
+
+ kp_kwargs = {
+ 'kill_on_timeout': False,
+ 'cwd': SCRIPT_DIRECTORY,
+ 'onTimeout': [timeoutHandler],
+ 'processOutputLine': [outputHandler],
+ }
+
+ if interactive:
+ # If an interactive debugger is attached,
+ # don't use timeouts, and don't capture ctrl-c.
+ timeout = None
+ signal.signal(signal.SIGINT, lambda sigid, frame: None)
+
+ if mozinfo.info.get('appname') == 'b2g' and mozinfo.info.get('toolkit') != 'gonk':
+ runner_cls = mozrunner.Runner
+ else:
+ runner_cls = mozrunner.runners.get(mozinfo.info.get('appname', 'firefox'),
+ mozrunner.Runner)
+ runner = runner_cls(profile=profile,
+ binary=binary,
+ process_class=mozprocess.ProcessHandlerMixin,
+ cmdargs=cmdargs,
+ env=env,
+ process_args=kp_kwargs)
+ runner.start(debug_args=debug_args,
+ interactive=interactive,
+ outputTimeout=timeout)
+ proc = runner.process_handler
+
+ if self.use_marionette:
+ marionette_args = {
+ 'socket_timeout': options.marionette_socket_timeout,
+ 'startup_timeout': options.marionette_startup_timeout,
+ 'symbols_path': options.symbolsPath,
+ }
+ if options.marionette:
+ host, port = options.marionette.split(':')
+ marionette_args['host'] = host
+ marionette_args['port'] = int(port)
+
+ marionette = Marionette(**marionette_args)
+ marionette.start_session(timeout=options.marionette_port_timeout)
+
+ addons = Addons(marionette)
+ if options.specialPowersExtensionPath:
+ addons.install(options.specialPowersExtensionPath, temp=True)
+
+ addons.install(options.reftestExtensionPath, temp=True)
+
+ marionette.delete_session()
+
+ status = runner.wait()
+ runner.process_handler = None
+
+ if status:
+ msg = "TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % \
+ (self.lastTestSeen, status)
+ # use process_output so message is logged verbatim
+ self.log.process_output(None, msg)
+ else:
+ self.lastTestSeen = 'Main app process exited normally'
+
+ crashed = mozcrash.log_crashes(self.log, os.path.join(profile.profile, 'minidumps'),
+ symbolsPath, test=self.lastTestSeen)
+
+ runner.cleanup()
+ if not status and crashed:
+ status = 1
+ return status
+
+ def runSerialTests(self, manifests, options, cmdargs=None):
+ debuggerInfo = None
+ if options.debugger:
+ debuggerInfo = mozdebug.get_debugger_info(options.debugger, options.debuggerArgs,
+ options.debuggerInteractive)
+
+ profileDir = None
+ try:
+ if cmdargs is None:
+ cmdargs = []
+
+ if self.use_marionette:
+ cmdargs.append('-marionette')
+
+ profile = self.createReftestProfile(options, manifests)
+ profileDir = profile.profile # name makes more sense
+
+ # browser environment
+ browserEnv = self.buildBrowserEnv(options, profileDir)
+
+ self.log.info("Running with e10s: {}".format(options.e10s))
+ status = self.runApp(profile,
+ binary=options.app,
+ cmdargs=cmdargs,
+ # give the JS harness 30 seconds to deal with
+ # its own timeouts
+ env=browserEnv,
+ timeout=options.timeout + 30.0,
+ symbolsPath=options.symbolsPath,
+ options=options,
+ debuggerInfo=debuggerInfo)
+ self.log.info("Process mode: {}".format('e10s' if options.e10s else 'non-e10s'))
+ mozleak.process_leak_log(self.leakLogFile,
+ leak_thresholds=options.leakThresholds,
+ stack_fixer=get_stack_fixer_function(options.utilityPath,
+ options.symbolsPath),
+ )
+ finally:
+ self.cleanup(profileDir)
+ return status
+
+ def copyExtraFilesToProfile(self, options, profile):
+ "Copy extra files or dirs specified on the command line to the testing profile."
+ profileDir = profile.profile
+ if not os.path.exists(os.path.join(profileDir, "hyphenation")):
+ os.makedirs(os.path.join(profileDir, "hyphenation"))
+ for f in options.extraProfileFiles:
+ abspath = self.getFullPath(f)
+ if os.path.isfile(abspath):
+ if os.path.basename(abspath) == 'user.js':
+ extra_prefs = mozprofile.Preferences.read_prefs(abspath)
+ profile.set_preferences(extra_prefs)
+ elif os.path.basename(abspath).endswith('.dic'):
+ shutil.copy2(abspath, os.path.join(profileDir, "hyphenation"))
+ else:
+ shutil.copy2(abspath, profileDir)
+ elif os.path.isdir(abspath):
+ dest = os.path.join(profileDir, os.path.basename(abspath))
+ shutil.copytree(abspath, dest)
+ else:
+ self.log.warning(
+ "runreftest.py | Failed to copy %s to profile" % abspath)
+ continue
+
+
+def run_test_harness(parser, options):
+ reftest = RefTest()
+ parser.validate(options, reftest)
+
+ # We have to validate options.app here for the case when the mach
+ # command is able to find it after argument parsing. This can happen
+ # when running from a tests.zip.
+ if not options.app:
+ parser.error("could not find the application path, --appname must be specified")
+
+ options.app = reftest.getFullPath(options.app)
+ if not os.path.exists(options.app):
+ parser.error("Error: Path %(app)s doesn't exist. Are you executing "
+ "$objdir/_tests/reftest/runreftest.py?" % {"app": options.app})
+
+ if options.xrePath is None:
+ options.xrePath = os.path.dirname(options.app)
+
+ return reftest.runTests(options.tests, options)
+
+
+if __name__ == "__main__":
+ parser = reftestcommandline.DesktopArgumentsParser()
+ options = parser.parse_args()
+ sys.exit(run_test_harness(parser, options))
diff --git a/layout/tools/reftest/runreftestb2g.py b/layout/tools/reftest/runreftestb2g.py
new file mode 100644
index 000000000..6a6346ece
--- /dev/null
+++ b/layout/tools/reftest/runreftestb2g.py
@@ -0,0 +1,425 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import ConfigParser
+import os
+import sys
+import tempfile
+import traceback
+
+# We need to know our current directory so that we can serve our test files from it.
+here = os.path.abspath(os.path.dirname(__file__))
+if here not in sys.path:
+ sys.path.insert(0, here)
+
+from automation import Automation
+from b2gautomation import B2GRemoteAutomation
+from runreftestmulet import run_test_harness as run_mulet_reftests
+from output import OutputHandler
+from remotereftest import RemoteReftestResolver, ReftestServer
+from runreftest import RefTest
+import reftestcommandline
+
+from marionette_harness import Marionette
+from mozdevice import DeviceManagerADB, DMError
+
+
+class ProfileConfigParser(ConfigParser.RawConfigParser):
+ """Subclass of RawConfigParser that outputs .ini files in the exact
+ format expected for profiles.ini, which is slightly different
+ than the default format.
+ """
+
+ def optionxform(self, optionstr):
+ return optionstr
+
+ def write(self, fp):
+ if self._defaults:
+ fp.write("[%s]\n" % ConfigParser.DEFAULTSECT)
+ for (key, value) in self._defaults.items():
+ fp.write("%s=%s\n" % (key, str(value).replace('\n', '\n\t')))
+ fp.write("\n")
+ for section in self._sections:
+ fp.write("[%s]\n" % section)
+ for (key, value) in self._sections[section].items():
+ if key == "__name__":
+ continue
+ if (value is not None) or (self._optcre == self.OPTCRE):
+ key = "=".join((key, str(value).replace('\n', '\n\t')))
+ fp.write("%s\n" % (key))
+ fp.write("\n")
+
+class B2GRemoteReftest(RefTest):
+
+ _devicemanager = None
+ use_marionette = False
+ localProfile = None
+ remoteApp = ''
+ profile = None
+ resolver_cls = RemoteReftestResolver
+
+ def __init__(self, automation, devicemanager, options, scriptDir):
+ RefTest.__init__(self)
+ self.automation = automation
+ self._devicemanager = devicemanager
+ self.runSSLTunnel = False
+ self.remoteTestRoot = options.remoteTestRoot
+ self.remoteProfile = options.remoteProfile
+ self.automation.setRemoteProfile(self.remoteProfile)
+ self.localLogName = options.localLogName
+ self.remoteLogFile = options.remoteLogFile
+ self.bundlesDir = '/system/b2g/distribution/bundles'
+ self.remoteMozillaPath = '/data/b2g/mozilla'
+ self.remoteProfilesIniPath = os.path.join(self.remoteMozillaPath, 'profiles.ini')
+ self.originalProfilesIni = None
+ self.scriptDir = scriptDir
+ self.SERVER_STARTUP_TIMEOUT = 90
+ if self.automation.IS_DEBUG_BUILD:
+ self.SERVER_STARTUP_TIMEOUT = 180
+
+ def cleanup(self, profileDir):
+ # Pull results back from device
+ if (self.remoteLogFile):
+ try:
+ self._devicemanager.getFile(self.remoteLogFile, self.localLogName)
+ except:
+ print "ERROR: We were not able to retrieve the info from %s" % self.remoteLogFile
+ sys.exit(5)
+
+ # Delete any bundled extensions
+ if profileDir:
+ extensionDir = os.path.join(profileDir, 'extensions', 'staged')
+ for filename in os.listdir(extensionDir):
+ try:
+ self._devicemanager._checkCmd(['shell', 'rm', '-rf',
+ os.path.join(self.bundlesDir, filename)])
+ except DMError:
+ pass
+
+ # Restore the original profiles.ini.
+ if self.originalProfilesIni:
+ try:
+ if not self.automation._is_emulator:
+ self.restoreProfilesIni()
+ os.remove(self.originalProfilesIni)
+ except:
+ pass
+
+ if not self.automation._is_emulator:
+ self._devicemanager.removeFile(self.remoteLogFile)
+ self._devicemanager.removeDir(self.remoteProfile)
+ self._devicemanager.removeDir(self.remoteTestRoot)
+
+ # We've restored the original profile, so reboot the device so that
+ # it gets picked up.
+ self.automation.rebootDevice()
+
+ RefTest.cleanup(self, profileDir)
+ if getattr(self, 'pidFile', '') != '':
+ try:
+ os.remove(self.pidFile)
+ os.remove(self.pidFile + ".xpcshell.pid")
+ except:
+ print "Warning: cleaning up pidfile '%s' was unsuccessful from the test harness" % self.pidFile
+
+ def findPath(self, paths, filename = None):
+ for path in paths:
+ p = path
+ if filename:
+ p = os.path.join(p, filename)
+ if os.path.exists(self.getFullPath(p)):
+ return path
+ return None
+
+ def startWebServer(self, options):
+ """ Create the webserver on the host and start it up """
+ remoteXrePath = options.xrePath
+ remoteProfilePath = self.remoteProfile
+ remoteUtilityPath = options.utilityPath
+ localAutomation = Automation()
+ localAutomation.IS_WIN32 = False
+ localAutomation.IS_LINUX = False
+ localAutomation.IS_MAC = False
+ localAutomation.UNIXISH = False
+ hostos = sys.platform
+ if hostos in ['mac', 'darwin']:
+ localAutomation.IS_MAC = True
+ elif hostos in ['linux', 'linux2']:
+ localAutomation.IS_LINUX = True
+ localAutomation.UNIXISH = True
+ elif hostos in ['win32', 'win64']:
+ localAutomation.BIN_SUFFIX = ".exe"
+ localAutomation.IS_WIN32 = True
+
+ paths = [options.xrePath,
+ localAutomation.DIST_BIN,
+ self.automation._product,
+ os.path.join('..', self.automation._product)]
+ options.xrePath = self.findPath(paths)
+ if options.xrePath == None:
+ print "ERROR: unable to find xulrunner path for %s, please specify with --xre-path" % (os.name)
+ sys.exit(1)
+ paths.append("bin")
+ paths.append(os.path.join("..", "bin"))
+
+ xpcshell = "xpcshell"
+ if (os.name == "nt"):
+ xpcshell += ".exe"
+
+ options.utilityPath = self.findPath(paths, xpcshell)
+ if options.utilityPath == None:
+ print "ERROR: unable to find utility path for %s, please specify with --utility-path" % (os.name)
+ sys.exit(1)
+
+ xpcshell = os.path.join(options.utilityPath, xpcshell)
+ if self.automation.elf_arm(xpcshell):
+ raise Exception('xpcshell at %s is an ARM binary; please use '
+ 'the --utility-path argument to specify the path '
+ 'to a desktop version.' % xpcshell)
+
+ options.serverProfilePath = tempfile.mkdtemp()
+ self.server = ReftestServer(localAutomation, options, self.scriptDir)
+ retVal = self.server.start()
+ if retVal:
+ return retVal
+
+ if (options.pidFile != ""):
+ f = open(options.pidFile + ".xpcshell.pid", 'w')
+ f.write("%s" % self.server._process.pid)
+ f.close()
+
+ retVal = self.server.ensureReady(self.SERVER_STARTUP_TIMEOUT)
+ if retVal:
+ return retVal
+
+ options.xrePath = remoteXrePath
+ options.utilityPath = remoteUtilityPath
+ options.profilePath = remoteProfilePath
+ return 0
+
+ def stopWebServer(self, options):
+ if hasattr(self, 'server'):
+ self.server.stop()
+
+ def restoreProfilesIni(self):
+ # restore profiles.ini on the device to its previous state
+ if not self.originalProfilesIni or not os.access(self.originalProfilesIni, os.F_OK):
+ raise DMError('Unable to install original profiles.ini; file not found: %s',
+ self.originalProfilesIni)
+
+ self._devicemanager.pushFile(self.originalProfilesIni, self.remoteProfilesIniPath)
+
+ def updateProfilesIni(self, profilePath):
+ # update profiles.ini on the device to point to the test profile
+ self.originalProfilesIni = tempfile.mktemp()
+ self._devicemanager.getFile(self.remoteProfilesIniPath, self.originalProfilesIni)
+
+ config = ProfileConfigParser()
+ config.read(self.originalProfilesIni)
+ for section in config.sections():
+ if 'Profile' in section:
+ config.set(section, 'IsRelative', 0)
+ config.set(section, 'Path', profilePath)
+
+ newProfilesIni = tempfile.mktemp()
+ with open(newProfilesIni, 'wb') as configfile:
+ config.write(configfile)
+
+ self._devicemanager.pushFile(newProfilesIni, self.remoteProfilesIniPath)
+ try:
+ os.remove(newProfilesIni)
+ except:
+ pass
+
+
+ def createReftestProfile(self, options, manifests):
+ profile = RefTest.createReftestProfile(self, options, manifests,
+ server=options.remoteWebServer)
+ profileDir = profile.profile
+
+ prefs = {}
+
+ # Turn off the locale picker screen
+ prefs["browser.firstrun.show.localepicker"] = False
+ prefs["b2g.system_startup_url"] = "app://test-container.gaiamobile.org/index.html"
+ prefs["b2g.system_manifest_url"] = "app://test-container.gaiamobile.org/manifest.webapp"
+ prefs["dom.ipc.tabs.disabled"] = False
+ prefs["dom.mozBrowserFramesEnabled"] = True
+ prefs["font.size.inflation.emPerLine"] = 0
+ prefs["font.size.inflation.minTwips"] = 0
+ prefs["network.dns.localDomains"] = "app://test-container.gaiamobile.org"
+ prefs["reftest.browser.iframe.enabled"] = False
+ prefs["reftest.remote"] = True
+
+ # Make sure we disable system updates
+ prefs["app.update.enabled"] = False
+ prefs["app.update.url"] = ""
+ # Disable webapp updates
+ prefs["webapps.update.enabled"] = False
+ # Disable tiles also
+ prefs["browser.newtabpage.directory.source"] = ""
+ prefs["browser.newtabpage.directory.ping"] = ""
+
+ # Set the extra prefs.
+ profile.set_preferences(prefs)
+
+ # Copy the profile to the device.
+ self._devicemanager.removeDir(self.remoteProfile)
+ try:
+ self._devicemanager.pushDir(profileDir, self.remoteProfile)
+ except DMError:
+ print "Automation Error: Unable to copy profile to device."
+ raise
+
+ # Copy the extensions to the B2G bundles dir.
+ extensionDir = os.path.join(profileDir, 'extensions', 'staged')
+ # need to write to read-only dir
+ self._devicemanager._checkCmd(['remount'])
+ for filename in os.listdir(extensionDir):
+ self._devicemanager._checkCmd(['shell', 'rm', '-rf',
+ os.path.join(self.bundlesDir, filename)])
+ try:
+ self._devicemanager.pushDir(extensionDir, self.bundlesDir)
+ except DMError:
+ print "Automation Error: Unable to copy extensions to device."
+ raise
+
+ self.updateProfilesIni(self.remoteProfile)
+
+ options.profilePath = self.remoteProfile
+ return profile
+
+ def copyExtraFilesToProfile(self, options, profile):
+ profileDir = profile.profile
+ RefTest.copyExtraFilesToProfile(self, options, profile)
+ try:
+ self._devicemanager.pushDir(profileDir, options.remoteProfile)
+ except DMError:
+ print "Automation Error: Failed to copy extra files to device"
+ raise
+
+ def environment(self, **kwargs):
+ return self.automation.environment(**kwargs)
+
+ def runApp(self, profile, binary, cmdargs, env,
+ timeout=None, debuggerInfo=None,
+ symbolsPath=None, options=None,
+ valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
+
+ outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
+ status = self.automation.runApp(None, env,
+ binary,
+ profile.profile,
+ cmdargs,
+ utilityPath=options.utilityPath,
+ xrePath=options.xrePath,
+ debuggerInfo=debuggerInfo,
+ symbolsPath=symbolsPath,
+ timeout=timeout,
+ outputHandler=outputHandler)
+ return status
+
+
+def run_test_harness(parser, options):
+ if options.mulet:
+ return run_mulet_reftests(parser, options)
+
+ auto = B2GRemoteAutomation(None, "fennec")
+
+ # create our Marionette instance
+ kwargs = {}
+ if options.emulator:
+ kwargs['emulator'] = options.emulator
+ auto.setEmulator(True)
+ if options.noWindow:
+ kwargs['noWindow'] = True
+ if options.geckoPath:
+ kwargs['gecko_path'] = options.geckoPath
+ if options.logdir:
+ kwargs['logdir'] = options.logdir
+ if options.busybox:
+ kwargs['busybox'] = options.busybox
+ if options.symbolsPath:
+ kwargs['symbols_path'] = options.symbolsPath
+ if options.emulator_res:
+ kwargs['emulator_res'] = options.emulator_res
+ if options.b2gPath:
+ kwargs['homedir'] = options.b2gPath
+ if options.marionette:
+ host,port = options.marionette.split(':')
+ kwargs['host'] = host
+ kwargs['port'] = int(port)
+ if options.adb_path:
+ kwargs['adb_path'] = options.adb_path
+ marionette = Marionette(**kwargs)
+ auto.marionette = marionette
+
+ if options.emulator:
+ dm = marionette.emulator.dm
+ else:
+ # create the DeviceManager
+ kwargs = {'adbPath': options.adb_path,
+ 'deviceRoot': options.remoteTestRoot}
+ if options.deviceIP:
+ kwargs.update({'host': options.deviceIP,
+ 'port': options.devicePort})
+ dm = DeviceManagerADB(**kwargs)
+ auto.setDeviceManager(dm)
+
+ parser.validate_remote(options, auto)
+
+ # TODO fix exception
+ if not options.ignoreWindowSize:
+ parts = dm.getInfo('screen')['screen'][0].split()
+ width = int(parts[0].split(':')[1])
+ height = int(parts[1].split(':')[1])
+ if (width < 1366 or height < 1050):
+ print "ERROR: Invalid screen resolution %sx%s, please adjust to 1366x1050 or higher" % (width, height)
+ return 1
+
+ auto.setProduct("b2g")
+ auto.test_script = os.path.join(here, 'b2g_start_script.js')
+ auto.test_script_args = [options.remoteWebServer, options.httpPort]
+
+ reftest = B2GRemoteReftest(auto, dm, options, here)
+ parser.validate(options, reftest)
+
+ logParent = os.path.dirname(options.remoteLogFile)
+ dm.mkDir(logParent);
+ auto.setRemoteLog(options.remoteLogFile)
+ auto.setServerInfo(options.webServer, options.httpPort, options.sslPort)
+
+ # Hack in a symbolic link for jsreftest
+ os.system("ln -s %s %s" % (os.path.join('..', 'jsreftest'), os.path.join(here, 'jsreftest')))
+
+
+ # Start the webserver
+ retVal = 1
+ try:
+ retVal = reftest.startWebServer(options)
+ if retVal:
+ return retVal
+ procName = options.app.split('/')[-1]
+ if (dm.processExist(procName)):
+ dm.killProcess(procName)
+
+ retVal = reftest.runTests(options.tests, options)
+ except:
+ print "Automation Error: Exception caught while running tests"
+ traceback.print_exc()
+ reftest.stopWebServer(options)
+ try:
+ reftest.cleanup(None)
+ except:
+ pass
+ return 1
+
+ reftest.stopWebServer(options)
+ return retVal
+
+
+if __name__ == "__main__":
+ parser = reftestcommandline.B2GArgumentParser()
+ options = parser.parse_args()
+ sys.exit(run_test_harness(parser, options))
diff --git a/layout/tools/reftest/runreftestmulet.py b/layout/tools/reftest/runreftestmulet.py
new file mode 100644
index 000000000..3d9218a29
--- /dev/null
+++ b/layout/tools/reftest/runreftestmulet.py
@@ -0,0 +1,203 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+from __future__ import print_function, unicode_literals
+
+import os
+import signal
+import sys
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+from marionette_driver import expected
+from marionette_driver.by import By
+from marionette_driver.marionette import Marionette
+from marionette_driver.wait import Wait
+
+from mozprocess import ProcessHandler
+from mozrunner import FirefoxRunner
+import mozinfo
+import mozlog
+
+from runreftest import RefTest
+from output import OutputHandler
+import reftestcommandline
+
+
+class MuletReftest(RefTest):
+ build_type = "mulet"
+ marionette = None
+
+ def __init__(self, marionette_args):
+ RefTest.__init__(self)
+ self.last_test = os.path.basename(__file__)
+ self.marionette_args = marionette_args
+ self.profile = None
+ self.runner = None
+ self.test_script = os.path.join(here, 'b2g_start_script.js')
+ self.timeout = None
+
+ def run_marionette_script(self):
+ self.marionette = Marionette(**self.marionette_args)
+ assert(self.marionette.wait_for_port())
+ self.marionette.start_session()
+ if self.build_type == "mulet":
+ self._wait_for_homescreen(timeout=300)
+ self._unlockScreen()
+ self.marionette.set_context(self.marionette.CONTEXT_CHROME)
+
+ if os.path.isfile(self.test_script):
+ f = open(self.test_script, 'r')
+ self.test_script = f.read()
+ f.close()
+ self.marionette.execute_script(self.test_script)
+
+ def run_tests(self, tests, options):
+ manifests = self.resolver.resolveManifests(options, tests)
+
+ self.profile = self.create_profile(options, manifests,
+ profile_to_clone=options.profile)
+ env = self.buildBrowserEnv(options, self.profile.profile)
+
+ self._populate_logger(options)
+ outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=options.symbolsPath)
+
+ kp_kwargs = { 'processOutputLine': [outputHandler],
+ 'onTimeout': [self._on_timeout],
+ 'kill_on_timeout': False }
+
+ if not options.debugger:
+ if not options.timeout:
+ if mozinfo.info['debug']:
+ options.timeout = 420
+ else:
+ options.timeout = 300
+ self.timeout = options.timeout + 30.0
+
+ self.log.info("%s | Running tests: start." % os.path.basename(__file__))
+ cmd, args = self.build_command_line(options.app,
+ ignore_window_size=options.ignoreWindowSize,
+ browser_arg=options.browser_arg)
+ self.runner = FirefoxRunner(profile=self.profile,
+ binary=cmd,
+ cmdargs=args,
+ env=env,
+ process_class=ProcessHandler,
+ process_args=kp_kwargs,
+ symbols_path=options.symbolsPath)
+
+ status = 0
+ try:
+ self.runner.start(outputTimeout=self.timeout)
+ self.log.info("%s | Application pid: %d" % (
+ os.path.basename(__file__),
+ self.runner.process_handler.pid))
+
+ # kick starts the reftest harness
+ self.run_marionette_script()
+ status = self.runner.wait()
+ finally:
+ self.runner.check_for_crashes(test_name=self.last_test)
+ self.runner.cleanup()
+
+ if status > 0:
+ self.log.testFail("%s | application terminated with exit code %s" % (
+ self.last_test, status))
+ elif status < 0:
+ self.log.info("%s | application killed with signal %s" % (
+ self.last_test, -status))
+
+ self.log.info("%s | Running tests: end." % os.path.basename(__file__))
+ return status
+
+ def create_profile(self, options, manifests, profile_to_clone=None):
+ profile = RefTest.createReftestProfile(self, options, manifests,
+ profile_to_clone=profile_to_clone)
+
+ prefs = {}
+ # Turn off the locale picker screen
+ prefs["browser.firstrun.show.localepicker"] = False
+ if not self.build_type == "mulet":
+ # FIXME: With Mulet we can't set this values since Gaia won't launch
+ prefs["b2g.system_startup_url"] = \
+ "app://test-container.gaiamobile.org/index.html"
+ prefs["b2g.system_manifest_url"] = \
+ "app://test-container.gaiamobile.org/manifest.webapp"
+ # Make sure we disable system updates
+ prefs["app.update.enabled"] = False
+ prefs["app.update.url"] = ""
+ # Disable webapp updates
+ prefs["webapps.update.enabled"] = False
+ # Disable tiles also
+ prefs["browser.newtabpage.directory.source"] = ""
+ prefs["browser.newtabpage.directory.ping"] = ""
+ prefs["dom.ipc.tabs.disabled"] = False
+ prefs["dom.mozBrowserFramesEnabled"] = True
+ prefs["font.size.inflation.emPerLine"] = 0
+ prefs["font.size.inflation.minTwips"] = 0
+ prefs["network.dns.localDomains"] = "app://test-container.gaiamobile.org"
+ prefs["reftest.browser.iframe.enabled"] = False
+ prefs["reftest.remote"] = False
+
+ # Set the extra prefs.
+ profile.set_preferences(prefs)
+ return profile
+
+ def build_command_line(self, app, ignore_window_size=False,
+ browser_arg=None):
+ cmd = os.path.abspath(app)
+ args = ['-marionette']
+
+ if browser_arg:
+ args += [browser_arg]
+
+ if not ignore_window_size:
+ args.extend(['--screen', '800x1000'])
+
+ if self.build_type == "mulet":
+ args += ['-chrome', 'chrome://b2g/content/shell.html']
+ return cmd, args
+
+ def _on_timeout(self):
+ msg = "%s | application timed out after %s seconds with no output"
+ self.log.testFail(msg % (self.last_test, self.timeout))
+ self.log.error("Force-terminating active process(es).");
+
+ # kill process to get a stack
+ self.runner.stop(sig=signal.SIGABRT)
+
+ def _unlockScreen(self):
+ self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
+ self.marionette.import_script(os.path.abspath(
+ os.path.join(__file__, os.path.pardir, "gaia_lock_screen.js")))
+ self.marionette.switch_to_frame()
+ self.marionette.execute_async_script('GaiaLockScreen.unlock()')
+
+ def _wait_for_homescreen(self, timeout):
+ self.log.info("Waiting for home screen to load")
+ Wait(self.marionette, timeout).until(expected.element_present(
+ By.CSS_SELECTOR, '#homescreen[loading-state=false]'))
+
+
+def run_test_harness(parser, options):
+ marionette_args = {}
+ if options.marionette:
+ host, port = options.marionette.split(':')
+ marionette_args['host'] = host
+ marionette_args['port'] = int(port)
+
+ reftest = MuletReftest(marionette_args)
+ parser.validate(options, reftest)
+
+ # add a -bin suffix if b2g-bin exists, but just b2g was specified
+ if options.app[-4:] != '-bin':
+ if os.path.isfile("%s-bin" % options.app):
+ options.app = "%s-bin" % options.app
+
+ if options.xrePath is None:
+ options.xrePath = os.path.dirname(options.app)
+
+ if options.mulet and not options.profile:
+ raise Exception("must specify --profile when specifying --mulet")
+
+ return reftest.run_tests(options.tests, options)