summaryrefslogtreecommitdiffstats
path: root/testing/docker
diff options
context:
space:
mode:
Diffstat (limited to 'testing/docker')
-rw-r--r--testing/docker/README.md145
-rw-r--r--testing/docker/REGISTRY1
-rw-r--r--testing/docker/android-gradle-build/Dockerfile97
-rw-r--r--testing/docker/android-gradle-build/README.md2
-rw-r--r--testing/docker/android-gradle-build/REGISTRY1
-rw-r--r--testing/docker/android-gradle-build/VERSION1
-rw-r--r--testing/docker/android-gradle-build/bin/after.sh45
-rw-r--r--testing/docker/android-gradle-build/bin/before.sh21
-rw-r--r--testing/docker/android-gradle-build/bin/build.sh29
-rw-r--r--testing/docker/android-gradle-build/bin/checkout-script.sh17
-rw-r--r--testing/docker/android-gradle-build/bin/checkout-sources.sh55
-rw-r--r--testing/docker/android-gradle-build/buildprops.json9
-rw-r--r--testing/docker/android-gradle-build/dot-config/pip/pip.conf2
-rw-r--r--testing/docker/android-gradle-build/oauth.txt2
-rw-r--r--testing/docker/base-build/Dockerfile20
-rw-r--r--testing/docker/base-build/VERSION1
-rwxr-xr-xtesting/docker/base-build/system-setup.sh46
-rw-r--r--testing/docker/base-test/Dockerfile138
-rw-r--r--testing/docker/base-test/REGISTRY1
-rw-r--r--testing/docker/base-test/VERSION1
-rw-r--r--testing/docker/base-test/sources.list37
-rw-r--r--testing/docker/beet-mover/Dockerfile25
-rw-r--r--testing/docker/beet-mover/requirements.txt2
-rw-r--r--testing/docker/centos6-build-upd/Dockerfile10
-rw-r--r--testing/docker/centos6-build-upd/REGISTRY1
-rw-r--r--testing/docker/centos6-build-upd/VERSION1
-rw-r--r--testing/docker/centos6-build/Dockerfile32
-rw-r--r--testing/docker/centos6-build/REGISTRY1
-rw-r--r--testing/docker/centos6-build/VERSION1
-rw-r--r--testing/docker/centos6-build/hgrc2
-rw-r--r--testing/docker/centos6-build/system-setup.sh477
-rw-r--r--testing/docker/decision/Dockerfile28
-rw-r--r--testing/docker/decision/README.md5
-rw-r--r--testing/docker/decision/REGISTRY1
-rw-r--r--testing/docker/decision/VERSION1
-rw-r--r--testing/docker/decision/system-setup.sh29
-rw-r--r--testing/docker/desktop-build/Dockerfile65
-rw-r--r--testing/docker/desktop-build/bin/build.sh36
-rw-r--r--testing/docker/desktop-build/bin/checkout-script.sh17
-rw-r--r--testing/docker/desktop-build/bin/checkout-sources.sh55
-rw-r--r--testing/docker/desktop-build/buildprops.json9
-rw-r--r--testing/docker/desktop-build/dot-config/pip/pip.conf2
-rw-r--r--testing/docker/desktop-build/oauth.txt2
-rw-r--r--testing/docker/desktop-test/Dockerfile108
-rw-r--r--testing/docker/desktop-test/apport1
-rw-r--r--testing/docker/desktop-test/buildprops.json8
-rw-r--r--testing/docker/desktop-test/deja-dup-monitor.desktop19
-rw-r--r--testing/docker/desktop-test/dot-files/config/pip/pip.conf2
-rw-r--r--testing/docker/desktop-test/dot-files/config/user-dirs.dirs15
-rw-r--r--testing/docker/desktop-test/dot-files/config/user-dirs.locale1
-rw-r--r--testing/docker/desktop-test/dot-files/pulse/default.pa164
-rw-r--r--testing/docker/desktop-test/fonts.conf5
-rw-r--r--testing/docker/desktop-test/jockey-gtk.desktop15
-rw-r--r--testing/docker/desktop-test/motd6
-rw-r--r--testing/docker/desktop-test/release-upgrades17
-rw-r--r--testing/docker/desktop-test/taskcluster-interactive-shell22
-rw-r--r--testing/docker/desktop-test/tc-vcs-config.yml40
-rw-r--r--testing/docker/desktop-test/tester.env4
-rw-r--r--testing/docker/desktop1604-test/Dockerfile116
-rw-r--r--testing/docker/desktop1604-test/apport1
-rw-r--r--testing/docker/desktop1604-test/autostart/deja-dup-monitor.desktop19
-rw-r--r--testing/docker/desktop1604-test/autostart/gnome-software-service.desktop9
-rw-r--r--testing/docker/desktop1604-test/autostart/jockey-gtk.desktop15
-rwxr-xr-xtesting/docker/desktop1604-test/bin/run-wizard108
-rw-r--r--testing/docker/desktop1604-test/buildprops.json8
-rw-r--r--testing/docker/desktop1604-test/dot-files/config/pip/pip.conf2
-rw-r--r--testing/docker/desktop1604-test/dot-files/config/user-dirs.dirs15
-rw-r--r--testing/docker/desktop1604-test/dot-files/config/user-dirs.locale1
-rw-r--r--testing/docker/desktop1604-test/dot-files/pulse/default.pa164
-rw-r--r--testing/docker/desktop1604-test/fonts.conf5
-rw-r--r--testing/docker/desktop1604-test/motd6
-rw-r--r--testing/docker/desktop1604-test/release-upgrades17
-rw-r--r--testing/docker/desktop1604-test/taskcluster-interactive-shell10
-rw-r--r--testing/docker/desktop1604-test/tc-vcs-config.yml40
-rw-r--r--testing/docker/desktop1604-test/tester.env4
-rw-r--r--testing/docker/firefox-snap/Dockerfile3
-rw-r--r--testing/docker/firefox-snap/Makefile12
-rw-r--r--testing/docker/firefox-snap/distribution.ini9
-rwxr-xr-xtesting/docker/firefox-snap/runme.sh66
-rw-r--r--testing/docker/firefox-snap/snapcraft.yaml.in37
-rw-r--r--testing/docker/funsize-balrog-submitter/Dockerfile35
-rw-r--r--testing/docker/funsize-balrog-submitter/Makefile17
-rw-r--r--testing/docker/funsize-balrog-submitter/dep.pubkey9
-rw-r--r--testing/docker/funsize-balrog-submitter/nightly.pubkey9
-rw-r--r--testing/docker/funsize-balrog-submitter/release.pubkey9
-rw-r--r--testing/docker/funsize-balrog-submitter/requirements.txt1
-rw-r--r--testing/docker/funsize-balrog-submitter/runme.sh22
-rw-r--r--testing/docker/funsize-balrog-submitter/scripts/funsize-balrog-submitter.py209
-rw-r--r--testing/docker/funsize-update-generator/Dockerfile35
-rw-r--r--testing/docker/funsize-update-generator/Makefile17
-rw-r--r--testing/docker/funsize-update-generator/dep.pubkey9
-rw-r--r--testing/docker/funsize-update-generator/nightly.pubkey9
-rw-r--r--testing/docker/funsize-update-generator/release.pubkey9
-rw-r--r--testing/docker/funsize-update-generator/requirements.txt2
-rw-r--r--testing/docker/funsize-update-generator/runme.sh25
-rwxr-xr-xtesting/docker/funsize-update-generator/scripts/funsize.py275
-rwxr-xr-xtesting/docker/funsize-update-generator/scripts/mbsdiff_hook.sh135
-rw-r--r--testing/docker/image_builder/Dockerfile40
-rw-r--r--testing/docker/image_builder/REGISTRY1
-rw-r--r--testing/docker/image_builder/VERSION1
-rwxr-xr-xtesting/docker/image_builder/build-image.sh59
-rw-r--r--testing/docker/image_builder/setup.sh53
-rw-r--r--testing/docker/lint/Dockerfile36
-rw-r--r--testing/docker/lint/system-setup.sh69
-rw-r--r--testing/docker/recipes/centos6-build-system-setup.sh11
-rw-r--r--testing/docker/recipes/common.sh10
-rw-r--r--testing/docker/recipes/install-mercurial.sh162
-rwxr-xr-xtesting/docker/recipes/run-task324
-rwxr-xr-xtesting/docker/recipes/tooltool.py1022
-rw-r--r--testing/docker/recipes/ubuntu1204-test-system-setup.sh279
-rw-r--r--testing/docker/recipes/ubuntu1604-test-system-setup.sh180
-rw-r--r--testing/docker/recipes/xvfb.sh75
-rw-r--r--testing/docker/rust-build/Dockerfile37
-rw-r--r--testing/docker/rust-build/README.md2
-rw-r--r--testing/docker/rust-build/REGISTRY1
-rw-r--r--testing/docker/rust-build/VERSION1
-rw-r--r--testing/docker/rust-build/build_cargo.sh20
-rw-r--r--testing/docker/rust-build/build_rust.sh26
-rw-r--r--testing/docker/rust-build/build_rust_mac.sh36
-rw-r--r--testing/docker/rust-build/fetch_cargo.sh21
-rw-r--r--testing/docker/rust-build/fetch_rust.sh20
-rw-r--r--testing/docker/rust-build/package_rust.sh13
-rw-r--r--testing/docker/rust-build/repack_rust.py177
-rw-r--r--testing/docker/rust-build/task.json37
-rw-r--r--testing/docker/rust-build/tcbuild.py206
-rw-r--r--testing/docker/rust-build/upload_rust.sh22
-rw-r--r--testing/docker/tester/Dockerfile33
-rw-r--r--testing/docker/tester/REGISTRY1
-rw-r--r--testing/docker/tester/VERSION1
-rw-r--r--testing/docker/tester/bin/test.sh31
-rw-r--r--testing/docker/tester/dot-config/pip/pip.conf2
-rw-r--r--testing/docker/tester/dot-config/user-dirs.dirs15
-rw-r--r--testing/docker/tester/dot-config/user-dirs.locale1
-rw-r--r--testing/docker/tester/dot-pulse/default.pa164
-rw-r--r--testing/docker/tester/tc-vcs-config.yml40
-rw-r--r--testing/docker/tester/tester.env4
-rw-r--r--testing/docker/upload-symbols/Dockerfile21
-rw-r--r--testing/docker/upload-symbols/README.md28
-rwxr-xr-xtesting/docker/upload-symbols/bin/checkout-script.sh16
-rwxr-xr-xtesting/docker/upload-symbols/bin/upload.sh21
-rwxr-xr-xtesting/docker/upload-symbols/test_exports.sh6
141 files changed, 6719 insertions, 0 deletions
diff --git a/testing/docker/README.md b/testing/docker/README.md
new file mode 100644
index 000000000..ca4a197c1
--- /dev/null
+++ b/testing/docker/README.md
@@ -0,0 +1,145 @@
+# Docker Images for use in TaskCluster
+
+This folder contains various docker images used in [taskcluster](http://docs.taskcluster.net/) as well as other misc docker images which may be useful for
+hacking on gecko.
+
+## Organization
+
+Each folder describes a single docker image. We have two types of images that can be defined:
+
+1. [Task Images (build-on-push)](#task-images-build-on-push)
+2. [Docker Images (prebuilt)](#docker-registry-images-prebuilt)
+
+These images depend on one another, as described in the [`FROM`](https://docs.docker.com/v1.8/reference/builder/#from)
+line at the top of the Dockerfile in each folder.
+
+Images could either be an image intended for pushing to a docker registry, or one that is meant either
+for local testing or being built as an artifact when pushed to vcs.
+
+### Task Images (build-on-push)
+
+Images can be uploaded as a task artifact, [indexed](#task-image-index-namespace) under
+a given namespace, and used in other tasks by referencing the task ID.
+
+Important to note, these images do not require building and pushing to a docker registry, and are
+build per push (if necessary) and uploaded as task artifacts.
+
+The decision task that is run per push will [determine](#context-directory-hashing)
+if the image needs to be built based on the hash of the context directory and if the image
+exists under the namespace for a given branch.
+
+As an additional convenience, and a precaution to loading images per branch, if an image
+has been indexed with a given context hash for mozilla-central, any tasks requiring that image
+will use that indexed task. This is to ensure there are not multiple images built/used
+that were built from the same context. In summary, if the image has been built for mozilla-central,
+pushes to any branch will use that already built image.
+
+To use within an in-tree task definition, the format is:
+
+```yaml
+image:
+ type: 'task-image'
+ path: 'public/image.tar.zst'
+ taskId: '{{#task_id_for_image}}builder{{/task_id_for_image}}'
+```
+
+##### Context Directory Hashing
+
+Decision tasks will calculate the sha256 hash of the contents of the image
+directory and will determine if the image already exists for a given branch and hash
+or if a new image must be built and indexed.
+
+Note: this is the contents of *only* the context directory, not the
+image contents.
+
+The decision task will:
+1. Recursively collect the paths of all files within the context directory
+2. Sort the filenames alphabetically to ensure the hash is consistently calculated
+3. Generate a sha256 hash of the contents of each file.
+4. All file hashes will then be combined with their path and used to update the hash
+of the context directory.
+
+This ensures that the hash is consistently calculated and path changes will result
+in different hashes being generated.
+
+##### Task Image Index Namespace
+
+Images that are built on push and uploaded as an artifact of a task will be indexed under the
+following namespaces.
+
+* docker.images.v2.level-{level}.{image_name}.latest
+* docker.images.v2.level-{level}.{image_name}.pushdate.{year}.{month}-{day}-{pushtime}
+* docker.images.v2.level-{level}.{image_name}.hash.{context_hash}
+
+Not only can images be browsed by the pushdate and context hash, but the 'latest' namespace
+is meant to view the latest built image. This functions similarly to the 'latest' tag
+for docker images that are pushed to a registry.
+
+### Docker Registry Images (prebuilt)
+
+***Deprecation Warning: Use of prebuilt images should only be used for base images (those that other images
+will inherit from), or private images that must be stored in a private docker registry account. Existing
+public images will be converted to images that are built on push and any newly added image should
+follow this pattern.***
+
+These are images that are intended to be pushed to a docker registry and used by specifying the
+folder name in task definitions. This information is automatically populated by using the 'docker_image'
+convenience method in task definitions.
+
+Example:
+ image: {#docker_image}builder{/docker_image}
+
+Each image has a version, given by its `VERSION` file. This should be bumped when any changes are made that will be deployed into taskcluster.
+Then, older tasks which were designed to run on an older version of the image can still be executed in taskcluster, while new tasks can use the new version.
+
+Each image also has a `REGISTRY`, defaulting to the `REGISTRY` in this directory, and specifying the image registry to which the completed image should be uploaded.
+
+## Building images
+
+Generally, images can be pulled from the [registry](./REGISTRY) rather than
+built locally, however, for developing new images it's often helpful to hack on
+them locally.
+
+To build an image, invoke `build.sh` with the name of the folder (without a trailing slash):
+```sh
+./build.sh base
+```
+
+This is a tiny wrapper around building the docker images via `docker
+build -t $REGISTRY/$FOLDER:$FOLDER_VERSION`
+
+Note: If no "VERSION" file present in the image directory, the tag 'latest' will be used and no
+registry user will be defined. The image is only meant to run locally and will overwrite
+any existing image with the same name and tag.
+
+On completion, if the image has been tagged with a version and registry, `build.sh` gives a
+command to upload the image to the registry, but this is not necessary until the image
+is ready for production usage. Docker will successfully find the local, tagged image
+while you continue to hack on the image definitions.
+
+## Adding a new image
+
+The docker image primitives are very basic building block for
+constructing an "image" but generally don't help much with tagging it
+for deployment so we have a wrapper (./build.sh) which adds some sugar
+to help with tagging/versioning... Each folder should look something
+like this:
+
+```
+ - your_amazing_image/
+ - your_amazing_image/Dockerfile: Standard docker file syntax
+ - your_amazing_image/VERSION: The version of the docker file
+ (required* used during tagging)
+ - your_amazing_image/REGISTRY: Override default registry
+ (useful for secret registries)
+```
+
+## Conventions
+
+In some image folders you will see `.env` files these can be used in
+conjunction with the `--env-file` flag in docker to provide a
+environment with the given environment variables. These are primarily
+for convenience when manually hacking on the images.
+
+You will also see a `system-setup.sh` script used to build the image.
+Do not replicate this technique - prefer to include the commands and options directly in the Dockerfile.
diff --git a/testing/docker/REGISTRY b/testing/docker/REGISTRY
new file mode 100644
index 000000000..bfdfbe252
--- /dev/null
+++ b/testing/docker/REGISTRY
@@ -0,0 +1 @@
+quay.io/mozilla
diff --git a/testing/docker/android-gradle-build/Dockerfile b/testing/docker/android-gradle-build/Dockerfile
new file mode 100644
index 000000000..b8fb5450f
--- /dev/null
+++ b/testing/docker/android-gradle-build/Dockerfile
@@ -0,0 +1,97 @@
+# TODO remove VOLUME below when the base image is updated next.
+FROM taskcluster/centos6-build-upd:0.1.6.20160329195300
+MAINTAINER Nick Alexander <nalexander@mozilla.com>
+
+# BEGIN ../desktop-build/Dockerfile
+
+# TODO remove when base image is updated
+VOLUME /home/worker/workspace
+VOLUME /home/worker/tooltool-cache
+
+# Add build scripts; these are the entry points from the taskcluster worker, and
+# operate on environment variables
+ADD bin /home/worker/bin
+RUN chmod +x /home/worker/bin/*
+
+# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb
+# %include testing/docker/recipes/xvfb.sh
+ADD topsrcdir/testing/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh
+
+# Add configuration
+COPY dot-config /home/worker/.config
+
+# Generate machine uuid file
+RUN dbus-uuidgen --ensure=/var/lib/dbus/machine-id
+
+# Stubbed out credentials; mozharness looks for this file an issues a WARNING
+# if it's not found, which causes the build to fail. Note that this needs to
+# be in the parent of the workspace directory and in the directory where
+# mozharness is run (not its --work-dir). See Bug 1169652.
+ADD oauth.txt /home/worker/
+
+# stubbed out buildprops, which keeps mozharness from choking
+# Note that this needs to be in the parent of the workspace directory and in
+# the directory where mozharness is run (not its --work-dir)
+ADD buildprops.json /home/worker/
+
+# install tooltool directly from github where tooltool_wrapper.sh et al. expect
+# to find it
+RUN wget -O /builds/tooltool.py https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py
+RUN chmod +x /builds/tooltool.py
+
+# END ../desktop-build/Dockerfile
+
+# Reset user/workdir from parent image so we can install software.
+WORKDIR /
+USER root
+
+# Update base.
+RUN yum upgrade -y
+
+# Install JDK and Sonatype Nexus. Cribbed directly from
+# https://github.com/sonatype/docker-nexus/blob/fffd2c61b2368292040910c055cf690c8e76a272/oss/Dockerfile.
+
+# Install the screen package here to use with xvfb.
+# Move installation to base centos6-build image once Bug 1272629 is fixed
+RUN yum install -y \
+ createrepo \
+ curl \
+ java-1.7.0-openjdk-devel \
+ java-1.7.0-openjdk \
+ screen \
+ sudo \
+ tar \
+ unzip \
+ wget \
+ zip \
+ && yum clean all
+
+ENV NEXUS_VERSION 2.12.0-01
+ENV NEXUS_SHA1SUM 1a9aaad8414baffe0a2fd46eed1f41b85f4049e6
+
+RUN mkdir -p /opt/sonatype/nexus
+
+WORKDIR /tmp
+RUN curl --fail --silent --location --retry 3 \
+ https://download.sonatype.com/nexus/oss/nexus-${NEXUS_VERSION}-bundle.tar.gz \
+ -o /tmp/nexus-${NEXUS_VERSION}-bundle.tar.gz
+
+# Observe the two spaces below. Seriously.
+RUN echo "${NEXUS_SHA1SUM} nexus-${NEXUS_VERSION}-bundle.tar.gz" > nexus-${NEXUS_VERSION}-bundle.tar.gz.sha1
+RUN sha1sum --check nexus-${NEXUS_VERSION}-bundle.tar.gz.sha1
+
+RUN tar zxf nexus-${NEXUS_VERSION}-bundle.tar.gz \
+ && mv /tmp/nexus-${NEXUS_VERSION}/* /opt/sonatype/nexus/ \
+ && rm -rf /tmp/nexus-${NEXUS_VERSION} \
+ && rm -rf /tmp/nexus-${NEXUS_VERSION}-bundle.tar.gz
+
+# Install tooltool directly from github.
+RUN mkdir /build
+ADD https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py /build/tooltool.py
+RUN chmod +rx /build/tooltool.py
+
+# Back to the centos6-build workdir, matching desktop-build.
+WORKDIR /home/worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/android-gradle-build/README.md b/testing/docker/android-gradle-build/README.md
new file mode 100644
index 000000000..6096b0836
--- /dev/null
+++ b/testing/docker/android-gradle-build/README.md
@@ -0,0 +1,2 @@
+This is a docker script for fetching Android Gradle dependenices for
+use in Mozilla's build clusters.
diff --git a/testing/docker/android-gradle-build/REGISTRY b/testing/docker/android-gradle-build/REGISTRY
new file mode 100644
index 000000000..cb1e1bb48
--- /dev/null
+++ b/testing/docker/android-gradle-build/REGISTRY
@@ -0,0 +1 @@
+taskcluster
diff --git a/testing/docker/android-gradle-build/VERSION b/testing/docker/android-gradle-build/VERSION
new file mode 100644
index 000000000..8acdd82b7
--- /dev/null
+++ b/testing/docker/android-gradle-build/VERSION
@@ -0,0 +1 @@
+0.0.1
diff --git a/testing/docker/android-gradle-build/bin/after.sh b/testing/docker/android-gradle-build/bin/after.sh
new file mode 100644
index 000000000..5fbbfa701
--- /dev/null
+++ b/testing/docker/android-gradle-build/bin/after.sh
@@ -0,0 +1,45 @@
+#!/bin/bash -vex
+
+set -x -e
+
+: WORKSPACE ${WORKSPACE:=/workspace}
+: GRADLE_VERSION ${GRADLE_VERSION:=2.14.1}
+
+set -v
+
+# Package everything up.
+pushd ${WORKSPACE}
+# Not yet. See notes on tooltool below.
+# cp -R /root/.android-sdk android-sdk-linux
+# tar cJf android-sdk-linux.tar.xz android-sdk-linux
+
+cp -R /workspace/nexus/storage/central jcentral
+tar cJf jcentral.tar.xz jcentral
+
+# The Gradle wrapper will have downloaded and verified the hash of exactly one
+# Gradle distribution. It will be located in $GRADLE_USER_HOME, like
+# ~/.gradle/wrapper/dists/gradle-2.7-all/$PROJECT_HASH/gradle-2.7-all.zip. We
+# want to remove the version from the internal directory for use via tooltool in
+# a mozconfig.
+cp $GRADLE_USER_HOME/wrapper/dists/gradle-${GRADLE_VERSION}-all/*/gradle-${GRADLE_VERSION}-all.zip gradle-${GRADLE_VERSION}-all.zip
+unzip -q gradle-${GRADLE_VERSION}-all.zip
+mv gradle-${GRADLE_VERSION} gradle-dist
+tar cJf gradle-dist.tar.xz gradle-dist
+
+mkdir -p /home/worker/artifacts
+# We can't redistribute the Android SDK publicly just yet. We'll
+# upload to (internal) tooltool eventually. mv
+# android-sdk-linux.tar.xz /home/worker/artifacts
+mv jcentral.tar.xz /home/worker/artifacts
+mv gradle-dist.tar.xz /home/worker/artifacts
+popd
+
+# Bug 1245170: at some point in the future, we'll be able to upload
+# things directly to tooltool.
+# pushd /home/worker/artifacts
+# /build/tooltool.py add --visibility=public jcentral.tar.xz
+# /build/tooltool.py add --visibility=public gradle-dist.tar.xz
+# /build/tooltool.py add --visibility=internal android-sdk-linux.tar.xz
+# /build/tooltool.py upload -v --url=http://relengapi/tooltool/ \
+# --message="No message - Gradle and jcentral archives uploaded from taskcluster."
+# popd
diff --git a/testing/docker/android-gradle-build/bin/before.sh b/testing/docker/android-gradle-build/bin/before.sh
new file mode 100644
index 000000000..c8669db01
--- /dev/null
+++ b/testing/docker/android-gradle-build/bin/before.sh
@@ -0,0 +1,21 @@
+#!/bin/bash -vex
+
+set -x -e
+
+: WORKSPACE ${WORKSPACE:=/workspace}
+: GRADLE_VERSION ${GRADLE_VERSION:=2.7}
+
+set -v
+
+# Frowned upon, but simplest.
+RUN_AS_USER=root NEXUS_WORK=${WORKSPACE}/nexus /opt/sonatype/nexus/bin/nexus restart
+
+# Wait "a while" for Nexus to actually start. Don't fail if this fails.
+wget --quiet --retry-connrefused --waitretry=2 --tries=100 \
+ http://localhost:8081/nexus/service/local/status || true
+rm -rf status
+
+# Verify Nexus has actually started. Fail if this fails.
+curl --fail --silent --location http://localhost:8081/nexus/service/local/status | grep '<state>STARTED</state>'
+
+export JAVA_HOME=/usr/lib/jvm/jre-1.7.0-openjdk.x86_64
diff --git a/testing/docker/android-gradle-build/bin/build.sh b/testing/docker/android-gradle-build/bin/build.sh
new file mode 100644
index 000000000..797be39f8
--- /dev/null
+++ b/testing/docker/android-gradle-build/bin/build.sh
@@ -0,0 +1,29 @@
+#! /bin/bash -vex
+
+set -x -e -v
+
+# TODO: when bug 1093833 is solved and tasks can run as non-root, reduce this
+# to a simple fail-if-root check
+if [ $(id -u) = 0 ]; then
+ # each of the caches we have mounted are owned by root, so update that ownership
+ # to 'worker'
+ for cache in /home/worker/.tc-vcs /home/worker/workspace /home/worker/tooltool-cache; do
+ if [ -d $cache ]; then
+ # -R probably isn't necessary forever, but it fixes some poisoned
+ # caches for now
+ chown -R worker:worker $cache
+ fi
+ done
+
+ # ..then drop privileges by re-running this script
+ exec su worker /home/worker/bin/build.sh
+fi
+
+####
+# The default build works for any fx_desktop_build based mozharness job:
+# via linux-build.sh
+####
+
+. $HOME/bin/checkout-sources.sh
+
+. $WORKSPACE/build/src/taskcluster/scripts/builder/build-linux.sh
diff --git a/testing/docker/android-gradle-build/bin/checkout-script.sh b/testing/docker/android-gradle-build/bin/checkout-script.sh
new file mode 100644
index 000000000..2bacf3f01
--- /dev/null
+++ b/testing/docker/android-gradle-build/bin/checkout-script.sh
@@ -0,0 +1,17 @@
+#! /bin/bash -vex
+
+set -x -e
+
+# Inputs, with defaults
+
+: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=https://hg.mozilla.org/mozilla-central}
+: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default}
+
+: SCRIPT_DOWNLOAD_PATH ${SCRIPT_DOWNLOAD_PATH:=$PWD}
+: SCRIPT_PATH ${SCRIPT_PATH:?"script path must be set"}
+set -v
+
+# download script from the gecko repository
+url=${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/${SCRIPT_PATH}
+wget --directory-prefix=${SCRIPT_DOWNLOAD_PATH} $url
+chmod +x `basename ${SCRIPT_PATH}`
diff --git a/testing/docker/android-gradle-build/bin/checkout-sources.sh b/testing/docker/android-gradle-build/bin/checkout-sources.sh
new file mode 100644
index 000000000..ce5d641d1
--- /dev/null
+++ b/testing/docker/android-gradle-build/bin/checkout-sources.sh
@@ -0,0 +1,55 @@
+#! /bin/bash -vex
+
+set -x -e
+
+# Inputs, with defaults
+
+# mozharness builds use three repositories: gecko (source), mozharness (build
+# scripts) and tools (miscellaneous) for each, specify *_REPOSITORY. If the
+# revision is not in the standard repo for the codebase, specify *_BASE_REPO as
+# the canonical repo to clone and *_HEAD_REPO as the repo containing the
+# desired revision. For Mercurial clones, only *_HEAD_REV is required; for Git
+# clones, specify the branch name to fetch as *_HEAD_REF and the desired sha1
+# as *_HEAD_REV.
+
+: GECKO_REPOSITORY ${GECKO_REPOSITORY:=https://hg.mozilla.org/mozilla-central}
+: GECKO_BASE_REPOSITORY ${GECKO_BASE_REPOSITORY:=${GECKO_REPOSITORY}}
+: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=${GECKO_REPOSITORY}}
+: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default}
+: GECKO_HEAD_REF ${GECKO_HEAD_REF:=${GECKO_HEAD_REV}}
+
+: TOOLS_REPOSITORY ${TOOLS_REPOSITORY:=https://hg.mozilla.org/build/tools}
+: TOOLS_BASE_REPOSITORY ${TOOLS_BASE_REPOSITORY:=${TOOLS_REPOSITORY}}
+: TOOLS_HEAD_REPOSITORY ${TOOLS_HEAD_REPOSITORY:=${TOOLS_REPOSITORY}}
+: TOOLS_HEAD_REV ${TOOLS_HEAD_REV:=default}
+: TOOLS_HEAD_REF ${TOOLS_HEAD_REF:=${TOOLS_HEAD_REV}}
+: TOOLS_DISABLE ${TOOLS_DISABLE:=false}
+
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+
+set -v
+
+# check out tools where mozharness expects it to be ($PWD/build/tools and $WORKSPACE/build/tools)
+if [ ! "$TOOLS_DISABLE" = true ]
+then
+ tc-vcs checkout $WORKSPACE/build/tools $TOOLS_BASE_REPOSITORY $TOOLS_HEAD_REPOSITORY $TOOLS_HEAD_REV $TOOLS_HEAD_REF
+
+ if [ ! -d build ]; then
+ mkdir -p build
+ ln -s $WORKSPACE/build/tools build/tools
+ fi
+fi
+
+# TODO - include tools repository in EXTRA_CHECKOUT_REPOSITORIES list
+for extra_repo in $EXTRA_CHECKOUT_REPOSITORIES; do
+ BASE_REPO="${extra_repo}_BASE_REPOSITORY"
+ HEAD_REPO="${extra_repo}_HEAD_REPOSITORY"
+ HEAD_REV="${extra_repo}_HEAD_REV"
+ HEAD_REF="${extra_repo}_HEAD_REF"
+ DEST_DIR="${extra_repo}_DEST_DIR"
+
+ tc-vcs checkout ${!DEST_DIR} ${!BASE_REPO} ${!HEAD_REPO} ${!HEAD_REV} ${!HEAD_REF}
+done
+
+export GECKO_DIR=$WORKSPACE/build/src
+tc-vcs checkout $GECKO_DIR $GECKO_BASE_REPOSITORY $GECKO_HEAD_REPOSITORY $GECKO_HEAD_REV $GECKO_HEAD_REF
diff --git a/testing/docker/android-gradle-build/buildprops.json b/testing/docker/android-gradle-build/buildprops.json
new file mode 100644
index 000000000..f38b7d788
--- /dev/null
+++ b/testing/docker/android-gradle-build/buildprops.json
@@ -0,0 +1,9 @@
+{
+ "properties": {
+ "buildername": ""
+ },
+ "sourcestamp": {
+ "changes": []
+ },
+ "comments": "TaskCluster Job"
+}
diff --git a/testing/docker/android-gradle-build/dot-config/pip/pip.conf b/testing/docker/android-gradle-build/dot-config/pip/pip.conf
new file mode 100644
index 000000000..73c2b2a52
--- /dev/null
+++ b/testing/docker/android-gradle-build/dot-config/pip/pip.conf
@@ -0,0 +1,2 @@
+[global]
+disable-pip-version-check = true
diff --git a/testing/docker/android-gradle-build/oauth.txt b/testing/docker/android-gradle-build/oauth.txt
new file mode 100644
index 000000000..e56c71f57
--- /dev/null
+++ b/testing/docker/android-gradle-build/oauth.txt
@@ -0,0 +1,2 @@
+taskcluster_clientId = None
+taskcluster_accessToken = None
diff --git a/testing/docker/base-build/Dockerfile b/testing/docker/base-build/Dockerfile
new file mode 100644
index 000000000..b5555cc94
--- /dev/null
+++ b/testing/docker/base-build/Dockerfile
@@ -0,0 +1,20 @@
+FROM centos:centos6
+MAINTAINER Jonas Finnemann Jensen <jopsen@gmail.com>
+
+# Run system setup script; this ensures taht the whole process
+# boils down to a single docker layer
+ADD system-setup.sh /tmp/system-setup.sh
+RUN ["/tmp/system-setup.sh"]
+
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
+
+# Declare default working folder
+WORKDIR /home/worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/base-build/VERSION b/testing/docker/base-build/VERSION
new file mode 100644
index 000000000..4e379d2bf
--- /dev/null
+++ b/testing/docker/base-build/VERSION
@@ -0,0 +1 @@
+0.0.2
diff --git a/testing/docker/base-build/system-setup.sh b/testing/docker/base-build/system-setup.sh
new file mode 100755
index 000000000..c47c59591
--- /dev/null
+++ b/testing/docker/base-build/system-setup.sh
@@ -0,0 +1,46 @@
+#!/bin/bash -ve
+
+################################### setup.sh ###################################
+
+### Check that we are running as root
+test `whoami` == 'root';
+
+### Add worker user
+# Minimize the number of things which the build script can do, security-wise
+# it's not a problem to let the build script install things with yum. But it
+# really shouldn't do this, so let's forbid root access.
+useradd -d /home/worker -s /bin/bash -m worker;
+
+# Install extra package mirror
+yum install -y epel-release
+
+### Install Useful Packages
+# First we update and upgrade to latest versions.
+yum update -y
+
+# Let's install some goodies, ca-certificates is needed for https with hg.
+# sudo will be required anyway, but let's make it explicit. It nice to have
+# sudo around. We'll also install nano, this is pure bloat I know, but it's
+# useful a text editor.
+yum install -y \
+ ca-certificates \
+ sudo \
+ nano \
+ ;
+
+# Then let's install all firefox build dependencies, these are extracted from
+# mozboot. See python/mozboot/bin/bootstrap.py in mozilla-central.
+yum groupinstall -y \
+ "Development Tools" \
+ "Development Libraries" \
+ "GNOME Software Development"
+
+### Clean up from setup
+# Remove cached packages. Cached package takes up a lot of space and
+# distributing them to workers is wasteful.
+yum clean all
+
+# Remove the setup.sh setup, we don't really need this script anymore, deleting
+# it keeps the image as clean as possible.
+rm $0; echo "Deleted $0";
+
diff --git a/testing/docker/base-test/Dockerfile b/testing/docker/base-test/Dockerfile
new file mode 100644
index 000000000..dd0bf77fe
--- /dev/null
+++ b/testing/docker/base-test/Dockerfile
@@ -0,0 +1,138 @@
+FROM quay.io/mozilla/ubuntu:12.04
+MAINTAINER Jonas Finnemann Jensen <jopsen@gmail.com>
+
+
+COPY sources.list /etc/apt/sources.list
+
+RUN useradd -d /home/worker -s /bin/bash -m worker
+# allow the worker user to access video devices
+RUN usermod -a -G video worker
+
+RUN apt-get update && apt-get install -y --force-yes \
+ alsa-base \
+ alsa-utils \
+ autoconf2.13 \
+ bluez-alsa \
+ bluez-alsa:i386 \
+ bluez-cups \
+ bluez-gstreamer \
+ build-essential \
+ ca-certificates \
+ ccache \
+ clang \
+ curl \
+ fonts-kacst \
+ fonts-kacst-one \
+ fonts-liberation \
+ fonts-stix \
+ fonts-unfonts-core \
+ fonts-unfonts-extra \
+ fonts-vlgothic \
+ g++-multilib \
+ gcc-multilib \
+ gir1.2-gnomebluetooth-1.0 \
+ git \
+ gstreamer0.10-alsa \
+ gstreamer0.10-ffmpeg \
+ gstreamer0.10-plugins-bad \
+ gstreamer0.10-plugins-base \
+ gstreamer0.10-plugins-good \
+ gstreamer0.10-plugins-ugly \
+ gstreamer0.10-tools \
+ libasound2-dev \
+ libasound2-plugins:i386 \
+ libcanberra-pulse \
+ libcurl4-openssl-dev \
+ libdbus-1-dev \
+ libdbus-glib-1-dev \
+ libdrm-intel1:i386 \
+ libdrm-nouveau1a:i386 \
+ libdrm-radeon1:i386 \
+ libdrm2:i386 \
+ libexpat1:i386 \
+ libgconf2-dev \
+ libgl1-mesa-dri \
+ libgl1-mesa-dri:i386 \
+ libgl1-mesa-glx \
+ libgl1-mesa-glx:i386 \
+ libglapi-mesa \
+ libglapi-mesa:i386 \
+ libglu1-mesa \
+ libglu1-mesa:i386 \
+ libgnome-bluetooth8 \
+ libgstreamer-plugins-base0.10-dev \
+ libgstreamer0.10-dev \
+ libgtk2.0-dev \
+ libiw-dev \
+ libllvm2.9 \
+ libllvm3.0:i386 \
+ libncurses5:i386 \
+ libnotify-dev \
+ libpulse-dev \
+ libpulse-mainloop-glib0:i386 \
+ libpulsedsp:i386 \
+ libsdl1.2debian:i386 \
+ libsox-fmt-alsa \
+ libx11-xcb1:i386 \
+ libxcb-glx0 \
+ libxcb-glx0:i386 \
+ libxdamage1:i386 \
+ libxfixes3:i386 \
+ libxt-dev \
+ libxxf86vm1 \
+ libxxf86vm1:i386 \
+ llvm \
+ llvm-2.9 \
+ llvm-2.9-dev \
+ llvm-2.9-runtime \
+ llvm-dev \
+ llvm-runtime \
+ mercurial \
+ mesa-common-dev \
+ nano \
+ pulseaudio \
+ pulseaudio-module-X11 \
+ pulseaudio-module-bluetooth \
+ pulseaudio-module-gconf \
+ python-dev \
+ python-pip \
+ rlwrap \
+ screen \
+ software-properties-common \
+ sudo \
+ tar \
+ ttf-arphic-uming \
+ ttf-dejavu \
+ ttf-indic-fonts-core \
+ ttf-kannada-fonts \
+ ttf-oriya-fonts \
+ ttf-paktype \
+ ttf-punjabi-fonts \
+ ttf-sazanami-mincho \
+ unzip \
+ uuid \
+ vim \
+ wget \
+ x11-xserver-utils \
+ x11-utils \
+ xvfb \
+ yasm \
+ zip
+
+ENV NODE_VERSION v4.2.2
+RUN cd /usr/local/ && \
+ curl https://nodejs.org/dist/$NODE_VERSION/node-$NODE_VERSION-linux-x64.tar.gz | tar -xz --strip-components 1 && \
+ node -v
+
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
+
+# Declare default working folder
+WORKDIR /home/worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/base-test/REGISTRY b/testing/docker/base-test/REGISTRY
new file mode 100644
index 000000000..cb1e1bb48
--- /dev/null
+++ b/testing/docker/base-test/REGISTRY
@@ -0,0 +1 @@
+taskcluster
diff --git a/testing/docker/base-test/VERSION b/testing/docker/base-test/VERSION
new file mode 100644
index 000000000..b1e80bb24
--- /dev/null
+++ b/testing/docker/base-test/VERSION
@@ -0,0 +1 @@
+0.1.3
diff --git a/testing/docker/base-test/sources.list b/testing/docker/base-test/sources.list
new file mode 100644
index 000000000..38d9829c5
--- /dev/null
+++ b/testing/docker/base-test/sources.list
@@ -0,0 +1,37 @@
+deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu/ precise main restricted universe
+deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu/ precise-security main restricted universe
+deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/releng/ precise main
+deb http://puppetagain.pub.build.mozilla.org/data/repos/apt/puppetlabs precise dependencies main
+
+deb http://archive.ubuntu.com/ubuntu/ precise main restricted
+deb-src http://archive.ubuntu.com/ubuntu/ precise main restricted
+
+## Major bug fix updates produced after the final release of the
+## distribution.
+deb http://archive.ubuntu.com/ubuntu/ precise-updates main restricted
+deb-src http://archive.ubuntu.com/ubuntu/ precise-updates main restricted
+
+## Uncomment the following two lines to add software from the 'universe'
+## repository.
+## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
+## team. Also, please note that software in universe WILL NOT receive any
+## review or updates from the Ubuntu security team.
+deb http://archive.ubuntu.com/ubuntu/ precise universe
+deb-src http://archive.ubuntu.com/ubuntu/ precise universe
+deb http://archive.ubuntu.com/ubuntu/ precise-updates universe
+deb-src http://archive.ubuntu.com/ubuntu/ precise-updates universe
+
+## N.B. software from this repository may not have been tested as
+## extensively as that contained in the main release, although it includes
+## newer versions of some applications which may provide useful features.
+## Also, please note that software in backports WILL NOT receive any review
+## or updates from the Ubuntu security team.
+# deb http://archive.ubuntu.com/ubuntu/ precise-backports main restricted
+# deb-src http://archive.ubuntu.com/ubuntu/ precise-backports main restricted
+
+deb http://archive.ubuntu.com/ubuntu/ precise-security main restricted
+deb-src http://archive.ubuntu.com/ubuntu/ precise-security main restricted
+deb http://archive.ubuntu.com/ubuntu/ precise-security universe
+deb-src http://archive.ubuntu.com/ubuntu/ precise-security universe
+# deb http://archive.ubuntu.com/ubuntu/ precise-security multiverse
+# deb-src http://archive.ubuntu.com/ubuntu/ precise-security multiverse
diff --git a/testing/docker/beet-mover/Dockerfile b/testing/docker/beet-mover/Dockerfile
new file mode 100644
index 000000000..95466f8e6
--- /dev/null
+++ b/testing/docker/beet-mover/Dockerfile
@@ -0,0 +1,25 @@
+FROM ubuntu:vivid
+
+# Ubuntu Vivid has been moved to the old-releases repo
+RUN sed -i -e 's/archive.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list
+RUN apt-get -q update \
+ && apt-get install --yes -q \
+ mercurial \
+ python-dev \
+ python-pip \
+ python-virtualenv \
+ libffi-dev \
+ liblzma-dev \
+ libssl-dev \
+ libyaml-dev \
+ libmysqlclient-dev \
+ clamav \
+ clamav-freshclam \
+ curl \
+ wget \
+ && apt-get clean
+
+COPY requirements.txt /tmp/
+RUN pip install -r /tmp/requirements.txt
+# Freshclam may be flaky, retry if it fails
+RUN for i in 1 2 3 4 5; do freshclam --verbose && break || sleep 15; done
diff --git a/testing/docker/beet-mover/requirements.txt b/testing/docker/beet-mover/requirements.txt
new file mode 100644
index 000000000..b4f746aa9
--- /dev/null
+++ b/testing/docker/beet-mover/requirements.txt
@@ -0,0 +1,2 @@
+sh
+redo
diff --git a/testing/docker/centos6-build-upd/Dockerfile b/testing/docker/centos6-build-upd/Dockerfile
new file mode 100644
index 000000000..a245bcbe5
--- /dev/null
+++ b/testing/docker/centos6-build-upd/Dockerfile
@@ -0,0 +1,10 @@
+FROM taskcluster/centos6-build:0.1.6
+MAINTAINER Dustin J. Mitchell <dustin@mozilla.com>
+
+### update to latest from upstream repositories
+# if this becomes a long list of packages, consider bumping the
+# centos6-build version
+RUN yum update -y
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/centos6-build-upd/REGISTRY b/testing/docker/centos6-build-upd/REGISTRY
new file mode 100644
index 000000000..cb1e1bb48
--- /dev/null
+++ b/testing/docker/centos6-build-upd/REGISTRY
@@ -0,0 +1 @@
+taskcluster
diff --git a/testing/docker/centos6-build-upd/VERSION b/testing/docker/centos6-build-upd/VERSION
new file mode 100644
index 000000000..01ae56f9e
--- /dev/null
+++ b/testing/docker/centos6-build-upd/VERSION
@@ -0,0 +1 @@
+0.1.6.20160329195300
diff --git a/testing/docker/centos6-build/Dockerfile b/testing/docker/centos6-build/Dockerfile
new file mode 100644
index 000000000..9e7ce405d
--- /dev/null
+++ b/testing/docker/centos6-build/Dockerfile
@@ -0,0 +1,32 @@
+FROM centos:6
+MAINTAINER Dustin J. Mitchell <dustin@mozilla.com>
+
+### add worker user and setup its workspace
+RUN useradd -d /home/worker -s /bin/bash -m worker
+# Declare default working folder
+WORKDIR /home/worker
+
+# This will create a host mounted filesystem when the cache is stripped
+# on Try. This cancels out some of the performance losses of aufs. See
+# bug 1291940.
+VOLUME /home/worker/workspace
+VOLUME /home/worker/tooltool-cache
+
+# install non-build specific dependencies in a single layer
+ADD system-setup.sh /tmp/system-setup.sh
+RUN bash /tmp/system-setup.sh
+
+# Builds need the share module enabled
+ADD hgrc /home/worker/.hgrc
+RUN chown -R worker:worker /home/worker/.hgrc
+
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
+ENV HOSTNAME taskcluster-worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/centos6-build/REGISTRY b/testing/docker/centos6-build/REGISTRY
new file mode 100644
index 000000000..cb1e1bb48
--- /dev/null
+++ b/testing/docker/centos6-build/REGISTRY
@@ -0,0 +1 @@
+taskcluster
diff --git a/testing/docker/centos6-build/VERSION b/testing/docker/centos6-build/VERSION
new file mode 100644
index 000000000..c946ee616
--- /dev/null
+++ b/testing/docker/centos6-build/VERSION
@@ -0,0 +1 @@
+0.1.6
diff --git a/testing/docker/centos6-build/hgrc b/testing/docker/centos6-build/hgrc
new file mode 100644
index 000000000..9a0681fbf
--- /dev/null
+++ b/testing/docker/centos6-build/hgrc
@@ -0,0 +1,2 @@
+[extensions]
+share =
diff --git a/testing/docker/centos6-build/system-setup.sh b/testing/docker/centos6-build/system-setup.sh
new file mode 100644
index 000000000..ddb529eed
--- /dev/null
+++ b/testing/docker/centos6-build/system-setup.sh
@@ -0,0 +1,477 @@
+#!/usr/bin/env bash
+
+set -ve
+
+test `whoami` == 'root'
+
+# lots of goodies in EPEL
+yum install -y epel-release
+
+# this sometimes fails, so we repeat it
+yum makecache || yum makecache
+
+yum shell -y <<'EOF'
+# This covers a bunch of requirements
+groupinstall Base
+
+install findutils
+install gawk
+install ppl
+install cpp
+install grep
+install gzip
+install sed
+install tar
+install util-linux
+install autoconf213
+install perl-Test-Simple
+install perl-Config-General
+
+# fonts required for PGO
+install xorg-x11-font*
+
+# lots of required packages that we build against. We need the i686 and x86_64
+# versions of each, along with -devel packages, and yum does a poor job of
+# figuring out the interdependencies so we list all four.
+
+install alsa-lib-devel.i686
+install alsa-lib-devel.x86_64
+install alsa-lib.i686
+install alsa-lib.x86_64
+install atk-devel.i686
+install atk-devel.x86_64
+install atk.i686
+install atk.x86_64
+install cairo-devel.i686
+install cairo-devel.x86_64
+install cairo.i686
+install cairo.x86_64
+install check-devel.i686
+install check-devel.x86_64
+install check.i686
+install check.x86_64
+install dbus-glib-devel.i686
+install dbus-glib-devel.x86_64
+install dbus-glib.i686
+install dbus-glib.x86_64
+install fontconfig-devel.i686
+install fontconfig-devel.x86_64
+install fontconfig.i686
+install fontconfig.x86_64
+install freetype-devel.i686
+install freetype-devel.x86_64
+install freetype.i686
+install freetype.x86_64
+install GConf2-devel.i686
+install GConf2-devel.x86_64
+install GConf2.i686
+install GConf2.x86_64
+install gdk-pixbuf2-devel.i686
+install gdk-pixbuf2-devel.x86_64
+install glib2-devel.i686
+install glib2-devel.x86_64
+install glib2.i686
+install glib2.x86_64
+install glibc-devel.i686
+install glibc-devel.x86_64
+install glibc.i686
+install glibc.x86_64
+install gnome-vfs2-devel.i686
+install gnome-vfs2-devel.x86_64
+install gnome-vfs2.i686
+install gnome-vfs2.x86_64
+install gstreamer-devel.i686
+install gstreamer-devel.x86_64
+install gstreamer.i686
+install gstreamer-plugins-base-devel.i686
+install gstreamer-plugins-base-devel.x86_64
+install gstreamer-plugins-base.i686
+install gstreamer-plugins-base.x86_64
+install gstreamer.x86_64
+install gtk2-devel.i686
+install gtk2-devel.x86_64
+install gtk2.i686
+install gtk2.x86_64
+install libcurl-devel.i686
+install libcurl-devel.x86_64
+install libcurl.i686
+install libcurl.x86_64
+install libdrm-devel.i686
+install libdrm-devel.x86_64
+install libdrm.i686
+install libdrm.x86_64
+install libICE-devel.i686
+install libICE-devel.x86_64
+install libICE.i686
+install libICE.x86_64
+install libIDL-devel.i686
+install libIDL-devel.x86_64
+install libIDL.i686
+install libIDL.x86_64
+install libidn-devel.i686
+install libidn-devel.x86_64
+install libidn.i686
+install libidn.x86_64
+install libnotify-devel.i686
+install libnotify-devel.x86_64
+install libnotify.i686
+install libnotify.x86_64
+install libpng-devel.i686
+install libpng-devel.x86_64
+install libpng.i686
+install libpng.x86_64
+install libSM-devel.i686
+install libSM-devel.x86_64
+install libSM.i686
+install libSM.x86_64
+install libstdc++-devel.i686
+install libstdc++-devel.x86_64
+install libstdc++.i686
+install libstdc++.x86_64
+install libX11-devel.i686
+install libX11-devel.x86_64
+install libX11.i686
+install libX11.x86_64
+install libXau-devel.i686
+install libXau-devel.x86_64
+install libXau.i686
+install libXau.x86_64
+install libxcb-devel.i686
+install libxcb-devel.x86_64
+install libxcb.i686
+install libxcb.x86_64
+install libXcomposite-devel.i686
+install libXcomposite-devel.x86_64
+install libXcomposite.i686
+install libXcomposite.x86_64
+install libXcursor-devel.i686
+install libXcursor-devel.x86_64
+install libXcursor.i686
+install libXcursor.x86_64
+install libXdamage-devel.i686
+install libXdamage-devel.x86_64
+install libXdamage.i686
+install libXdamage.x86_64
+install libXdmcp-devel.i686
+install libXdmcp-devel.x86_64
+install libXdmcp.i686
+install libXdmcp.x86_64
+install libXext-devel.i686
+install libXext-devel.x86_64
+install libXext.i686
+install libXext.x86_64
+install libXfixes-devel.i686
+install libXfixes-devel.x86_64
+install libXfixes.i686
+install libXfixes.x86_64
+install libXft-devel.i686
+install libXft-devel.x86_64
+install libXft.i686
+install libXft.x86_64
+install libXi-devel.i686
+install libXi-devel.x86_64
+install libXi.i686
+install libXinerama-devel.i686
+install libXinerama-devel.x86_64
+install libXinerama.i686
+install libXinerama.x86_64
+install libXi.x86_64
+install libxml2-devel.i686
+install libxml2-devel.x86_64
+install libxml2.i686
+install libxml2.x86_64
+install libXrandr-devel.i686
+install libXrandr-devel.x86_64
+install libXrandr.i686
+install libXrandr.x86_64
+install libXrender-devel.i686
+install libXrender-devel.x86_64
+install libXrender.i686
+install libXrender.x86_64
+install libXt-devel.i686
+install libXt-devel.x86_64
+install libXt.i686
+install libXt.x86_64
+install libXxf86vm-devel.i686
+install libXxf86vm-devel.x86_64
+install libXxf86vm.i686
+install libXxf86vm.x86_64
+install mesa-libGL-devel.i686
+install mesa-libGL-devel.x86_64
+install mesa-libGL.i686
+install mesa-libGL.x86_64
+install ORBit2-devel.i686
+install ORBit2-devel.x86_64
+install ORBit2.i686
+install ORBit2.x86_64
+install pango-devel.i686
+install pango-devel.x86_64
+install pango.i686
+install pango.x86_64
+install pixman-devel.i686
+install pixman-devel.x86_64
+install pixman.i686
+install pixman.x86_64
+install pulseaudio-libs-devel.i686
+install pulseaudio-libs-devel.x86_64
+install pulseaudio-libs.i686
+install pulseaudio-libs.x86_64
+install wireless-tools-devel.i686
+install wireless-tools-devel.x86_64
+install wireless-tools.i686
+install wireless-tools.x86_64
+install zlib-devel.i686
+install zlib-devel.x86_64
+install zlib.i686
+install zlib.x86_64
+
+# x86_64 only packages
+install hal-devel.x86_64
+install hal.x86_64
+install perl-devel.x86_64
+install perl.x86_64
+install dbus-x11.x86_64
+
+# glibc-static has no -devel
+install glibc-static.i686
+install glibc-static.x86_64
+
+# dbus-devel comes in two architectures, although dbus does not
+install dbus-devel.i686
+install dbus-devel.x86_64
+install dbus.x86_64
+
+# required for the Python build, below
+install bzip2-devel
+install openssl-devel
+install xz-libs
+install sqlite-devel
+
+# required for the git build, below
+install autoconf
+install perl-devel
+install perl-ExtUtils-MakeMaker
+install gettext-devel
+
+# build utilities
+install ccache
+
+# a basic node environment so that we can run TaskCluster tools
+install nodejs
+install npm
+
+# enough X to run `make check` and do a PGO build
+install Xvfb
+install xvinfo
+
+# required for building OS X tools
+install patch
+install libuuid-devel
+install openssl-static
+install cmake
+install subversion
+run
+EOF
+
+BUILD=/root/build
+mkdir $BUILD
+
+# for the builds below, there's no sense using ccache
+export CCACHE_DISABLE=1
+
+cd $BUILD
+curl https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py > tooltool.py
+
+tooltool_fetch() {
+ cat >manifest.tt
+ python $BUILD/tooltool.py fetch
+ rm manifest.tt
+}
+
+# For a few packges, we want to run the very latest, which is hard to find for
+# stable old CentOS 6. Custom yum repostiories are cumbersome and can cause
+# unhappy failures when they contain multiple versions of the same package. So
+# we either build from source or install an RPM from tooltool (the former being
+# the preferred solution for transparency). Each of these source files was
+# downloaded directly from the upstream project site, although the RPMs are of
+# unknown origin.
+
+cd $BUILD
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 17051332,
+ "digest": "57c816a6df9731aa5f34678abb59ea560bbdb5abd01df3f3a001dc94a3695d3190b1121caba483f8d8c4a405f4e53fde63a628527aca73f05652efeaec9621c4",
+ "algorithm": "sha512",
+ "filename": "valgrind-3.10.0-1.x86_64.rpm"
+},
+{
+ "size": 830601,
+ "digest": "c04dadf29a3ac676e93cb684b619f753584f8414167135eb766602671d08c85d7bc564511310564bdf2651d72da911b017f0969b9a26d84df724aebf8733f268",
+ "algorithm": "sha512",
+ "filename": "yasm-1.1.0-1.x86_64.rpm"
+}
+]
+EOF
+yum install -y valgrind-*.rpm
+yum install -y yasm-*.rpm
+
+# The source RPM for valgrind; not used here, but included for reference
+: <<'EOF'
+[
+{
+ "size": 10767445,
+ "digest": "d435897b602f7bdf77fabf1c80bbd06ba4f7288ad0ef31d19a863546d4651172421b45f2f090bad3c3355c9fa2a00352066f18d99bf994838579b768b90553d3",
+ "algorithm": "sha512",
+ "filename": "valgrind-3.10.0-1.src.rpm"
+}
+]
+EOF
+
+# Git
+cd $BUILD
+# NOTE: rc builds are in https://www.kernel.org/pub/software/scm/git/testing/
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 3938976,
+ "visibility": "public",
+ "digest": "f31cedb6d7c813d5cc9f40daa54ec6b34b046b8ec1b7a09a37598637f747449147a22736e95e4388d1a29fd01d7974b82342114b91d63b9d5df163ea3659fe72",
+ "algorithm": "sha512",
+ "filename": "git-2.8.0.rc3.tar.xz",
+ "unpack": true
+}
+]
+EOF
+cd git-2.8.0.rc3
+make configure
+./configure --prefix=/usr --without-tcltk
+make all install
+git config --global user.email "nobody@mozilla.com"
+git config --global user.name "mozilla"
+
+# Python
+cd $BUILD
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 12250696,
+ "digest": "67615a6defbcda062f15a09f9dd3b9441afd01a8cc3255e5bc45b925378a0ddc38d468b7701176f6cc153ec52a4f21671b433780d9bde343aa9b9c1b2ae29feb",
+ "algorithm": "sha512",
+ "filename": "Python-2.7.10.tar.xz",
+ "unpack": true
+}
+]
+EOF
+cd Python-2.7.10
+./configure --prefix=/usr
+make
+# `altinstall` means that /usr/bin/python still points to CentOS's Python 2.6 install.
+# If you want Python 2.7, use `python2.7`
+make altinstall
+
+# Enough python utilities to get "peep" working
+cd $BUILD
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 630700,
+ "digest": "1367f3a10c1fef2f8061e430585f1927f6bd7c416e764d65cea1f4255824d549efa77beef8ff784bbd62c307b4b1123502e7b3fd01a243c0cc5b433a841cc8b5",
+ "algorithm": "sha512",
+ "filename": "setuptools-18.1.tar.gz",
+ "unpack": true
+},
+{
+ "size": 1051205,
+ "digest": "e7d2e003ec60fce5a75a6a23711d7f9b155e898faebcf55f3abdd912ef513f4e0cf43daca8f9da7179a7a4efe6e4a625a532d051349818847df1364eb5b326de",
+ "algorithm": "sha512",
+ "filename": "pip-6.1.1.tar.gz",
+ "unpack": true
+},
+{
+ "size": 26912,
+ "digest": "9d730ed7852d4d217aaddda959cd5f871ef1b26dd6c513a3780bbb04a5a93a49d6b78e95c2274451a1311c10cc0a72755b269dc9af62640474e6e73a1abec370",
+ "algorithm": "sha512",
+ "filename": "peep-2.4.1.tar.gz",
+ "unpack": false
+}
+]
+EOF
+
+cd $BUILD
+cd setuptools-18.1
+python2.7 setup.py install
+# NOTE: latest peep is not compatible with pip>=7.0
+# https://github.com/erikrose/peep/pull/94
+
+cd $BUILD
+cd pip-6.1.1
+python2.7 setup.py install
+
+cd $BUILD
+pip2.7 install peep-2.4.1.tar.gz
+
+# Peep (latest)
+cd $BUILD
+pip2.7 install peep
+
+# remaining Python utilities are installed with `peep` from upstream
+# repositories; peep verifies file integrity for us
+cat >requirements.txt <<'EOF'
+# sha256: 90pZQ6kAXB6Je8-H9-ivfgDAb6l3e5rWkfafn6VKh9g
+virtualenv==13.1.2
+
+# sha256: wJnELXTi1SC2HdNyzZlrD6dgXAZheDT9exPHm5qaWzA
+mercurial==3.7.3
+EOF
+peep install -r requirements.txt
+
+# TC-VCS
+npm install -g taskcluster-vcs@2.3.18
+
+# Ninja
+cd $BUILD
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 174501,
+ "digest": "551a9e14b95c2d2ddad6bee0f939a45614cce86719748dc580192dd122f3671e3d95fd6a6fb3facb2d314ba100d61a004af4df77f59df119b1b95c6fe8c38875",
+ "algorithm": "sha512",
+ "filename": "ninja-1.6.0.tar.gz",
+ "unpack": true
+}
+]
+EOF
+cd ninja-1.6.0
+./configure.py --bootstrap
+cp ninja /usr/local/bin/ninja
+# Old versions of Cmake can only find ninja in this location!
+ln -s /usr/local/bin/ninja /usr/local/bin/ninja-build
+
+# note that TC will replace workspace with a cache mount; there's no sense
+# creating anything inside there
+mkdir -p /home/worker/workspace
+chown worker:worker /home/worker/workspace
+
+# /builds is *not* replaced with a mount in the docker container. The worker
+# user writes to lots of subdirectories, though, so it's owned by that user
+mkdir -p /builds
+chown worker:worker /builds
+
+# remove packages installed for the builds above
+yum shell -y <<'EOF'
+remove bzip2-devel
+remove openssl-devel
+remove xz-libs
+remove autoconf
+remove perl-ExtUtils-MakeMaker
+remove gettext-devel
+remove sqlite-devel
+remove perl-devel
+EOF
+
+# clean up caches from all that downloading and building
+cd /
+rm -rf $BUILD ~/.ccache ~/.cache ~/.npm
+yum clean all
+rm $0
diff --git a/testing/docker/decision/Dockerfile b/testing/docker/decision/Dockerfile
new file mode 100644
index 000000000..3f58399fd
--- /dev/null
+++ b/testing/docker/decision/Dockerfile
@@ -0,0 +1,28 @@
+FROM ubuntu:16.04
+MAINTAINER Greg Arndt <garndt@mozilla.com>
+
+# Add worker user
+RUN useradd -d /home/worker -s /bin/bash -m worker
+RUN mkdir /home/worker/artifacts && chown worker:worker /home/worker/artifacts
+
+# %include testing/docker/recipes/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /tmp/tooltool.py
+
+# %include testing/mozharness/external_tools/robustcheckout.py
+ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
+
+# %include testing/docker/recipes/install-mercurial.sh
+ADD topsrcdir/testing/docker/recipes/install-mercurial.sh /tmp/install-mercurial.sh
+
+ADD system-setup.sh /tmp/system-setup.sh
+RUN bash /tmp/system-setup.sh
+
+# %include testing/docker/recipes/run-task
+ADD topsrcdir/testing/docker/recipes/run-task /home/worker/bin/run-task
+
+ENV PATH /home/worker/bin:$PATH
+ENV SHELL /bin/bash
+ENV HOME /home/worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/decision/README.md b/testing/docker/decision/README.md
new file mode 100644
index 000000000..4490880be
--- /dev/null
+++ b/testing/docker/decision/README.md
@@ -0,0 +1,5 @@
+# Decision Tasks
+
+The decision image is a "boostrapping" image for the in tree logic it
+deals with cloning gecko and the related utilities for providing an
+environment where we can run gecko.
diff --git a/testing/docker/decision/REGISTRY b/testing/docker/decision/REGISTRY
new file mode 100644
index 000000000..cb1e1bb48
--- /dev/null
+++ b/testing/docker/decision/REGISTRY
@@ -0,0 +1 @@
+taskcluster
diff --git a/testing/docker/decision/VERSION b/testing/docker/decision/VERSION
new file mode 100644
index 000000000..11808190d
--- /dev/null
+++ b/testing/docker/decision/VERSION
@@ -0,0 +1 @@
+0.1.7
diff --git a/testing/docker/decision/system-setup.sh b/testing/docker/decision/system-setup.sh
new file mode 100644
index 000000000..6a8eccfdf
--- /dev/null
+++ b/testing/docker/decision/system-setup.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+set -v -e
+
+test `whoami` == 'root'
+
+apt-get update
+apt-get install -y --force-yes --no-install-recommends \
+ ca-certificates \
+ python \
+ sudo
+
+BUILD=/root/build
+mkdir $BUILD
+
+tooltool_fetch() {
+ cat >manifest.tt
+ python2.7 /tmp/tooltool.py fetch
+ rm manifest.tt
+}
+
+cd $BUILD
+. /tmp/install-mercurial.sh
+
+cd /
+rm -rf $BUILD
+apt-get clean
+apt-get autoclean
+rm $0
diff --git a/testing/docker/desktop-build/Dockerfile b/testing/docker/desktop-build/Dockerfile
new file mode 100644
index 000000000..4ccb4c985
--- /dev/null
+++ b/testing/docker/desktop-build/Dockerfile
@@ -0,0 +1,65 @@
+# TODO remove VOLUME below when the base image is updated next.
+FROM taskcluster/centos6-build-upd:0.1.6.20160329195300
+MAINTAINER Dustin J. Mitchell <dustin@mozilla.com>
+
+# TODO remove when base image is updated
+VOLUME /home/worker/workspace
+VOLUME /home/worker/tooltool-cache
+
+# Add build scripts; these are the entry points from the taskcluster worker, and
+# operate on environment variables
+ADD bin /home/worker/bin
+RUN chmod +x /home/worker/bin/*
+
+# %include testing/docker/recipes/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /builds/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /setup/tooltool.py
+
+# %include testing/mozharness/external_tools/robustcheckout.py
+ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
+
+# %include testing/docker/recipes/common.sh
+ADD topsrcdir/testing/docker/recipes/common.sh /setup/common.sh
+
+# %include testing/docker/recipes/install-mercurial.sh
+ADD topsrcdir/testing/docker/recipes/install-mercurial.sh /setup/install-mercurial.sh
+
+# %include testing/docker/recipes/centos6-build-system-setup.sh
+ADD topsrcdir/testing/docker/recipes/centos6-build-system-setup.sh /setup/system-setup.sh
+
+# TODO remove once base image doesn't install Mercurial
+RUN pip uninstall -y Mercurial
+
+RUN bash /setup/system-setup.sh
+
+# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb
+# %include testing/docker/recipes/xvfb.sh
+ADD topsrcdir/testing/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh
+
+# %include testing/docker/recipes/run-task
+ADD topsrcdir/testing/docker/recipes/run-task /home/worker/bin/run-task
+
+# Add configuration
+COPY dot-config /home/worker/.config
+
+# Generate machine uuid file
+RUN dbus-uuidgen --ensure=/var/lib/dbus/machine-id
+
+# Stubbed out credentials; mozharness looks for this file an issues a WARNING
+# if it's not found, which causes the build to fail. Note that this needs to
+# be in the parent of the workspace directory and in the directory where
+# mozharness is run (not its --work-dir). See Bug 1169652.
+ADD oauth.txt /home/worker/
+
+# stubbed out buildprops, which keeps mozharness from choking
+# Note that this needs to be in the parent of the workspace directory and in
+# the directory where mozharness is run (not its --work-dir)
+ADD buildprops.json /home/worker/
+
+# Move installation to base centos6-build image once Bug 1272629 is fixed
+# Install the screen package here to use with xvfb.
+# Install bison to build binutils.
+RUN yum install -y bison screen
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/desktop-build/bin/build.sh b/testing/docker/desktop-build/bin/build.sh
new file mode 100644
index 000000000..b7e1502a9
--- /dev/null
+++ b/testing/docker/desktop-build/bin/build.sh
@@ -0,0 +1,36 @@
+#! /bin/bash -vex
+
+set -x -e -v
+
+# Relative path to in-tree script
+: JOB_SCRIPT ${JOB_SCRIPT:=taskcluster/scripts/builder/build-linux.sh}
+
+script_args="${@}"
+
+# TODO: when bug 1093833 is solved and tasks can run as non-root, reduce this
+# to a simple fail-if-root check
+if [ $(id -u) = 0 ]; then
+ # each of the caches we have mounted are owned by root, so update that ownership
+ # to 'worker'
+ for cache in /home/worker/.tc-vcs /home/worker/workspace /home/worker/tooltool-cache; do
+ if [ -d $cache ]; then
+ # -R probably isn't necessary forever, but it fixes some poisoned
+ # caches for now
+ chown -R worker:worker $cache
+ fi
+ done
+
+ # ..then drop privileges by re-running this script
+ exec su worker -c "/home/worker/bin/build.sh $script_args"
+fi
+
+####
+# The default build works for any fx_desktop_build based mozharness job:
+# via build-linux.sh
+####
+
+. $HOME/bin/checkout-sources.sh
+
+script=$WORKSPACE/build/src/$JOB_SCRIPT
+chmod +x $script
+exec $script $script_args
diff --git a/testing/docker/desktop-build/bin/checkout-script.sh b/testing/docker/desktop-build/bin/checkout-script.sh
new file mode 100644
index 000000000..2bacf3f01
--- /dev/null
+++ b/testing/docker/desktop-build/bin/checkout-script.sh
@@ -0,0 +1,17 @@
+#! /bin/bash -vex
+
+set -x -e
+
+# Inputs, with defaults
+
+: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=https://hg.mozilla.org/mozilla-central}
+: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default}
+
+: SCRIPT_DOWNLOAD_PATH ${SCRIPT_DOWNLOAD_PATH:=$PWD}
+: SCRIPT_PATH ${SCRIPT_PATH:?"script path must be set"}
+set -v
+
+# download script from the gecko repository
+url=${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/${SCRIPT_PATH}
+wget --directory-prefix=${SCRIPT_DOWNLOAD_PATH} $url
+chmod +x `basename ${SCRIPT_PATH}`
diff --git a/testing/docker/desktop-build/bin/checkout-sources.sh b/testing/docker/desktop-build/bin/checkout-sources.sh
new file mode 100644
index 000000000..9080472bc
--- /dev/null
+++ b/testing/docker/desktop-build/bin/checkout-sources.sh
@@ -0,0 +1,55 @@
+#! /bin/bash -vex
+
+set -x -e
+
+# Inputs, with defaults
+
+# mozharness builds use two repositories: gecko (source)
+# and build-tools (miscellaneous) for each, specify *_REPOSITORY. If the
+# revision is not in the standard repo for the codebase, specify *_BASE_REPO as
+# the canonical repo to clone and *_HEAD_REPO as the repo containing the
+# desired revision. For Mercurial clones, only *_HEAD_REV is required; for Git
+# clones, specify the branch name to fetch as *_HEAD_REF and the desired sha1
+# as *_HEAD_REV.
+
+: GECKO_REPOSITORY ${GECKO_REPOSITORY:=https://hg.mozilla.org/mozilla-central}
+: GECKO_BASE_REPOSITORY ${GECKO_BASE_REPOSITORY:=${GECKO_REPOSITORY}}
+: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=${GECKO_REPOSITORY}}
+: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default}
+: GECKO_HEAD_REF ${GECKO_HEAD_REF:=${GECKO_HEAD_REV}}
+
+: TOOLS_REPOSITORY ${TOOLS_REPOSITORY:=https://hg.mozilla.org/build/tools}
+: TOOLS_BASE_REPOSITORY ${TOOLS_BASE_REPOSITORY:=${TOOLS_REPOSITORY}}
+: TOOLS_HEAD_REPOSITORY ${TOOLS_HEAD_REPOSITORY:=${TOOLS_REPOSITORY}}
+: TOOLS_HEAD_REV ${TOOLS_HEAD_REV:=default}
+: TOOLS_HEAD_REF ${TOOLS_HEAD_REF:=${TOOLS_HEAD_REV}}
+: TOOLS_DISABLE ${TOOLS_DISABLE:=false}
+
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+
+set -v
+
+# check out tools where mozharness expects it to be ($PWD/build/tools and $WORKSPACE/build/tools)
+if [ ! "$TOOLS_DISABLE" = true ]
+then
+ tc-vcs checkout $WORKSPACE/build/tools $TOOLS_BASE_REPOSITORY $TOOLS_HEAD_REPOSITORY $TOOLS_HEAD_REV $TOOLS_HEAD_REF
+
+ if [ ! -d build ]; then
+ mkdir -p build
+ ln -s $WORKSPACE/build/tools build/tools
+ fi
+fi
+
+# TODO - include tools repository in EXTRA_CHECKOUT_REPOSITORIES list
+for extra_repo in $EXTRA_CHECKOUT_REPOSITORIES; do
+ BASE_REPO="${extra_repo}_BASE_REPOSITORY"
+ HEAD_REPO="${extra_repo}_HEAD_REPOSITORY"
+ HEAD_REV="${extra_repo}_HEAD_REV"
+ HEAD_REF="${extra_repo}_HEAD_REF"
+ DEST_DIR="${extra_repo}_DEST_DIR"
+
+ tc-vcs checkout ${!DEST_DIR} ${!BASE_REPO} ${!HEAD_REPO} ${!HEAD_REV} ${!HEAD_REF}
+done
+
+export GECKO_DIR=$WORKSPACE/build/src
+tc-vcs checkout $GECKO_DIR $GECKO_BASE_REPOSITORY $GECKO_HEAD_REPOSITORY $GECKO_HEAD_REV $GECKO_HEAD_REF
diff --git a/testing/docker/desktop-build/buildprops.json b/testing/docker/desktop-build/buildprops.json
new file mode 100644
index 000000000..f38b7d788
--- /dev/null
+++ b/testing/docker/desktop-build/buildprops.json
@@ -0,0 +1,9 @@
+{
+ "properties": {
+ "buildername": ""
+ },
+ "sourcestamp": {
+ "changes": []
+ },
+ "comments": "TaskCluster Job"
+}
diff --git a/testing/docker/desktop-build/dot-config/pip/pip.conf b/testing/docker/desktop-build/dot-config/pip/pip.conf
new file mode 100644
index 000000000..73c2b2a52
--- /dev/null
+++ b/testing/docker/desktop-build/dot-config/pip/pip.conf
@@ -0,0 +1,2 @@
+[global]
+disable-pip-version-check = true
diff --git a/testing/docker/desktop-build/oauth.txt b/testing/docker/desktop-build/oauth.txt
new file mode 100644
index 000000000..e56c71f57
--- /dev/null
+++ b/testing/docker/desktop-build/oauth.txt
@@ -0,0 +1,2 @@
+taskcluster_clientId = None
+taskcluster_accessToken = None
diff --git a/testing/docker/desktop-test/Dockerfile b/testing/docker/desktop-test/Dockerfile
new file mode 100644
index 000000000..995ff34df
--- /dev/null
+++ b/testing/docker/desktop-test/Dockerfile
@@ -0,0 +1,108 @@
+FROM ubuntu:12.04
+MAINTAINER Jonas Finnemann Jensen <jopsen@gmail.com>
+
+RUN useradd -d /home/worker -s /bin/bash -m worker
+WORKDIR /home/worker
+
+# %include testing/docker/recipes/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /setup/tooltool.py
+
+# %include testing/mozharness/external_tools/robustcheckout.py
+ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
+
+# %include testing/docker/recipes/install-mercurial.sh
+ADD topsrcdir/testing/docker/recipes/install-mercurial.sh /tmp/install-mercurial.sh
+
+# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb
+# %include testing/docker/recipes/xvfb.sh
+ADD topsrcdir/testing/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh
+
+# Add the tooltool manifest containing the minidump_stackwalk binary.
+# %include testing/config/tooltool-manifests/linux64/releng.manifest
+ADD topsrcdir/testing/config/tooltool-manifests/linux64/releng.manifest /tmp/minidump_stackwalk.manifest
+
+# %include testing/docker/recipes/ubuntu1204-test-system-setup.sh
+ADD topsrcdir/testing/docker/recipes/ubuntu1204-test-system-setup.sh /setup/system-setup.sh
+RUN bash /setup/system-setup.sh
+
+# %include testing/docker/recipes/run-task
+ADD topsrcdir/testing/docker/recipes/run-task /home/worker/bin/run-task
+
+# %include taskcluster/scripts/tester/test-ubuntu.sh
+ADD topsrcdir/taskcluster/scripts/tester/test-ubuntu.sh /home/worker/bin/test-linux.sh
+
+# This will create a host mounted filesystem when the cache is stripped
+# on Try. This cancels out some of the performance losses of aufs. See
+# bug 1291940.
+VOLUME /home/worker/checkouts
+VOLUME /home/worker/workspace
+
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
+ENV HOSTNAME taskcluster-worker
+ENV LANG en_US.UTF-8
+ENV LC_ALL en_US.UTF-8
+
+# Add utilities and configuration
+COPY dot-files/config /home/worker/.config
+COPY dot-files/pulse /home/worker/.pulse
+RUN chmod +x bin/*
+# TODO: remove this when buildbot is gone
+COPY buildprops.json /home/worker/buildprops.json
+COPY tc-vcs-config.yml /etc/taskcluster-vcs.yml
+
+# TODO: remove
+ADD https://raw.githubusercontent.com/taskcluster/buildbot-step/master/buildbot_step /home/worker/bin/buildbot_step
+RUN chmod u+x /home/worker/bin/buildbot_step
+
+# allow the worker user to access video devices
+RUN usermod -a -G video worker
+
+RUN mkdir Documents; mkdir Pictures; mkdir Music; mkdir Videos; mkdir artifacts
+
+# install tc-vcs and tc-npm-cache
+RUN npm install -g taskcluster-vcs@2.3.12 \
+ && npm install -g taskcluster-npm-cache@1.1.14 \
+ && rm -rf ~/.npm
+ENV PATH $PATH:/home/worker/bin
+
+# TODO Re-enable worker when bug 1093833 lands
+#USER worker
+
+# clean up
+RUN rm -Rf .cache && mkdir -p .cache
+
+# Disable Ubuntu update prompt
+# http://askubuntu.com/questions/515161/ubuntu-12-04-disable-release-notification-of-14-04-in-update-manager
+ADD release-upgrades /etc/update-manager/release-upgrades
+
+# Disable tools with on-login popups that interfere with tests; see bug 1240084 and bug 984944.
+ADD jockey-gtk.desktop deja-dup-monitor.desktop /etc/xdg/autostart/
+
+# In test.sh we accept START_VNC to start a vnc daemon.
+# Exposing this port allows it to work.
+EXPOSE 5900
+
+# This helps not forgetting setting DISPLAY=:0 when running
+# tests outside of test.sh
+ENV DISPLAY :0
+
+# Disable apport (Ubuntu app crash reporter) to avoid stealing focus from test runs
+ADD apport /etc/default/apport
+
+# Disable font antialiasing for now to match releng's setup
+ADD fonts.conf /home/worker/.fonts.conf
+
+# Set up first-run experience for interactive mode
+ADD motd /etc/taskcluster-motd
+ADD taskcluster-interactive-shell /bin/taskcluster-interactive-shell
+RUN chmod +x /bin/taskcluster-interactive-shell
+
+RUN chown -R worker:worker /home/worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/desktop-test/apport b/testing/docker/desktop-test/apport
new file mode 100644
index 000000000..42e5f8d3a
--- /dev/null
+++ b/testing/docker/desktop-test/apport
@@ -0,0 +1 @@
+enabled=0
diff --git a/testing/docker/desktop-test/buildprops.json b/testing/docker/desktop-test/buildprops.json
new file mode 100644
index 000000000..f0967b026
--- /dev/null
+++ b/testing/docker/desktop-test/buildprops.json
@@ -0,0 +1,8 @@
+{
+ "properties": {
+ "buildername": ""
+ },
+ "sourcestamp": {
+ "changes": []
+ }
+}
diff --git a/testing/docker/desktop-test/deja-dup-monitor.desktop b/testing/docker/desktop-test/deja-dup-monitor.desktop
new file mode 100644
index 000000000..c3b8a4c67
--- /dev/null
+++ b/testing/docker/desktop-test/deja-dup-monitor.desktop
@@ -0,0 +1,19 @@
+[Desktop Entry]
+Version=1.0
+X-Ubuntu-Gettext-Domain=deja-dup
+
+Name=Backup Monitor
+Comment=Schedules backups at regular intervals
+
+Icon=deja-dup
+TryExec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor
+Exec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor
+
+# Bug 984944/1240084 - It prevents taking screenshots
+X-GNOME-Autostart-Delay=false
+
+StartupNotify=false
+NoDisplay=true
+
+Type=Application
+Categories=System;Utility;Archiving;
diff --git a/testing/docker/desktop-test/dot-files/config/pip/pip.conf b/testing/docker/desktop-test/dot-files/config/pip/pip.conf
new file mode 100644
index 000000000..73c2b2a52
--- /dev/null
+++ b/testing/docker/desktop-test/dot-files/config/pip/pip.conf
@@ -0,0 +1,2 @@
+[global]
+disable-pip-version-check = true
diff --git a/testing/docker/desktop-test/dot-files/config/user-dirs.dirs b/testing/docker/desktop-test/dot-files/config/user-dirs.dirs
new file mode 100644
index 000000000..2db2718d2
--- /dev/null
+++ b/testing/docker/desktop-test/dot-files/config/user-dirs.dirs
@@ -0,0 +1,15 @@
+# This file is written by xdg-user-dirs-update
+# If you want to change or add directories, just edit the line you're
+# interested in. All local changes will be retained on the next run
+# Format is XDG_xxx_DIR="$HOME/yyy", where yyy is a shell-escaped
+# homedir-relative path, or XDG_xxx_DIR="/yyy", where /yyy is an
+# absolute path. No other format is supported.
+
+XDG_DESKTOP_DIR="$HOME/Desktop"
+XDG_DOWNLOAD_DIR="$HOME/Downloads"
+XDG_TEMPLATES_DIR="$HOME/Templates"
+XDG_PUBLICSHARE_DIR="$HOME/Public"
+XDG_DOCUMENTS_DIR="$HOME/Documents"
+XDG_MUSIC_DIR="$HOME/Music"
+XDG_PICTURES_DIR="$HOME/Pictures"
+XDG_VIDEOS_DIR="$HOME/Videos"
diff --git a/testing/docker/desktop-test/dot-files/config/user-dirs.locale b/testing/docker/desktop-test/dot-files/config/user-dirs.locale
new file mode 100644
index 000000000..7741b83a3
--- /dev/null
+++ b/testing/docker/desktop-test/dot-files/config/user-dirs.locale
@@ -0,0 +1 @@
+en_US
diff --git a/testing/docker/desktop-test/dot-files/pulse/default.pa b/testing/docker/desktop-test/dot-files/pulse/default.pa
new file mode 100644
index 000000000..39bb44aa7
--- /dev/null
+++ b/testing/docker/desktop-test/dot-files/pulse/default.pa
@@ -0,0 +1,164 @@
+#!/usr/bin/pulseaudio -nF
+#
+# This file is part of PulseAudio.
+#
+# PulseAudio is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# PulseAudio is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with PulseAudio; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+# This startup script is used only if PulseAudio is started per-user
+# (i.e. not in system mode)
+
+.nofail
+
+### Load something into the sample cache
+#load-sample-lazy x11-bell /usr/share/sounds/gtk-events/activate.wav
+#load-sample-lazy pulse-hotplug /usr/share/sounds/startup3.wav
+#load-sample-lazy pulse-coldplug /usr/share/sounds/startup3.wav
+#load-sample-lazy pulse-access /usr/share/sounds/generic.wav
+
+.fail
+
+### Automatically restore the volume of streams and devices
+load-module module-device-restore
+load-module module-stream-restore
+load-module module-card-restore
+
+### Automatically augment property information from .desktop files
+### stored in /usr/share/application
+load-module module-augment-properties
+
+### Load audio drivers statically
+### (it's probably better to not load these drivers manually, but instead
+### use module-udev-detect -- see below -- for doing this automatically)
+#load-module module-alsa-sink
+#load-module module-alsa-source device=hw:1,0
+#load-module module-oss device="/dev/dsp" sink_name=output source_name=input
+#load-module module-oss-mmap device="/dev/dsp" sink_name=output source_name=input
+#load-module module-null-sink
+#load-module module-pipe-sink
+
+### Automatically load driver modules depending on the hardware available
+.ifexists module-udev-detect.so
+load-module module-udev-detect
+.else
+### Use the static hardware detection module (for systems that lack udev/hal support)
+load-module module-detect
+.endif
+
+### Automatically connect sink and source if JACK server is present
+.ifexists module-jackdbus-detect.so
+.nofail
+load-module module-jackdbus-detect
+.fail
+.endif
+
+### Automatically load driver modules for Bluetooth hardware
+# This module causes a pulseaudio startup failure on "gecko-tester"
+#.ifexists module-bluetooth-discover.so
+#load-module module-bluetooth-discover
+#.endif
+
+### Load several protocols
+.ifexists module-esound-protocol-unix.so
+load-module module-esound-protocol-unix
+.endif
+load-module module-native-protocol-unix
+
+### Network access (may be configured with paprefs, so leave this commented
+### here if you plan to use paprefs)
+#load-module module-esound-protocol-tcp
+#load-module module-native-protocol-tcp
+#load-module module-zeroconf-publish
+
+### Load the RTP receiver module (also configured via paprefs, see above)
+#load-module module-rtp-recv
+
+### Load the RTP sender module (also configured via paprefs, see above)
+#load-module module-null-sink sink_name=rtp format=s16be channels=2 rate=44100 sink_properties="device.description='RTP Multicast Sink'"
+#load-module module-rtp-send source=rtp.monitor
+
+### Load additional modules from GConf settings. This can be configured with the paprefs tool.
+### Please keep in mind that the modules configured by paprefs might conflict with manually
+### loaded modules.
+.ifexists module-gconf.so
+.nofail
+load-module module-gconf
+.fail
+.endif
+
+### Automatically restore the default sink/source when changed by the user
+### during runtime
+### NOTE: This should be loaded as early as possible so that subsequent modules
+### that look up the default sink/source get the right value
+load-module module-default-device-restore
+
+### Automatically move streams to the default sink if the sink they are
+### connected to dies, similar for sources
+load-module module-rescue-streams
+
+### Make sure we always have a sink around, even if it is a null sink.
+load-module module-always-sink
+
+### Honour intended role device property
+load-module module-intended-roles
+
+### Automatically suspend sinks/sources that become idle for too long
+load-module module-suspend-on-idle
+
+### If autoexit on idle is enabled we want to make sure we only quit
+### when no local session needs us anymore.
+# This module causes a pulseaudio startup failure on "gecko-tester"
+#.ifexists module-console-kit.so
+#load-module module-console-kit
+#.endif
+
+### Enable positioned event sounds
+load-module module-position-event-sounds
+
+### Cork music streams when a phone stream is active
+#load-module module-cork-music-on-phone
+
+### Modules to allow autoloading of filters (such as echo cancellation)
+### on demand. module-filter-heuristics tries to determine what filters
+### make sense, and module-filter-apply does the heavy-lifting of
+### loading modules and rerouting streams.
+load-module module-filter-heuristics
+load-module module-filter-apply
+
+### Load DBus protocol
+#.ifexists module-dbus-protocol.so
+#load-module module-dbus-protocol
+#.endif
+
+# X11 modules should not be started from default.pa so that one daemon
+# can be shared by multiple sessions.
+
+### Load X11 bell module
+#load-module module-x11-bell sample=bell-windowing-system
+
+### Register ourselves in the X11 session manager
+#load-module module-x11-xsmp
+
+### Publish connection data in the X11 root window
+#.ifexists module-x11-publish.so
+#.nofail
+#load-module module-x11-publish
+#.fail
+#.endif
+
+load-module module-switch-on-port-available
+
+### Make some devices default
+#set-default-sink output
+#set-default-source input
diff --git a/testing/docker/desktop-test/fonts.conf b/testing/docker/desktop-test/fonts.conf
new file mode 100644
index 000000000..9784fcc98
--- /dev/null
+++ b/testing/docker/desktop-test/fonts.conf
@@ -0,0 +1,5 @@
+<match target="font">
+ <edit name="antialias" mode="assign">
+ <bool>false</bool>
+ </edit>
+</match>
diff --git a/testing/docker/desktop-test/jockey-gtk.desktop b/testing/docker/desktop-test/jockey-gtk.desktop
new file mode 100644
index 000000000..e433ba898
--- /dev/null
+++ b/testing/docker/desktop-test/jockey-gtk.desktop
@@ -0,0 +1,15 @@
+[Desktop Entry]
+Name=Check for new hardware drivers
+Comment=Notify about new hardware drivers available for the system
+Icon=jockey
+Exec=sh -c "test -e /var/cache/jockey/check || exec jockey-gtk --check"
+Terminal=false
+Type=Application
+Categories=System;Settings;GTK;HardwareSettings;
+NotShowIn=KDE;
+X-Ubuntu-Gettext-Domain=jockey
+
+# Bug 984944/1240084 - It prevents taking screenshots
+X-GNOME-Autostart-Delay=false
+
+NoDisplay=true
diff --git a/testing/docker/desktop-test/motd b/testing/docker/desktop-test/motd
new file mode 100644
index 000000000..f958393cd
--- /dev/null
+++ b/testing/docker/desktop-test/motd
@@ -0,0 +1,6 @@
+Welcome to your taskcluster interactive shell! The regularly scheduled task
+has been paused to give you a chance to set up your debugging environment.
+
+For your convenience, the exact mozharness command needed for this task can
+be invoked using the 'run-mozharness' command.
+
diff --git a/testing/docker/desktop-test/release-upgrades b/testing/docker/desktop-test/release-upgrades
new file mode 100644
index 000000000..d714f1d82
--- /dev/null
+++ b/testing/docker/desktop-test/release-upgrades
@@ -0,0 +1,17 @@
+# Default behavior for the release upgrader.
+
+[DEFAULT]
+# Default prompting behavior, valid options:
+#
+# never - Never check for a new release.
+# normal - Check to see if a new release is available. If more than one new
+# release is found, the release upgrader will attempt to upgrade to
+# the release that immediately succeeds the currently-running
+# release.
+# lts - Check to see if a new LTS release is available. The upgrader
+# will attempt to upgrade to the first LTS release available after
+# the currently-running one. Note that this option should not be
+# used if the currently-running release is not itself an LTS
+# release, since in that case the upgrader won't be able to
+# determine if a newer release is available.
+Prompt=never
diff --git a/testing/docker/desktop-test/taskcluster-interactive-shell b/testing/docker/desktop-test/taskcluster-interactive-shell
new file mode 100644
index 000000000..c782c0ea9
--- /dev/null
+++ b/testing/docker/desktop-test/taskcluster-interactive-shell
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+download() {
+ name=`basename $1`
+ url=${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/$1
+ if ! curl --fail --silent -o ./$name --retry 10 $url; then
+ fail "failed downloading $1 from ${GECKO_HEAD_REPOSITORY}"
+ fi
+}
+
+cd $HOME/bin;
+download taskcluster/scripts/tester/run-wizard;
+chmod +x run-wizard;
+./run-wizard;
+
+SPAWN="$SHELL";
+if [ "$SHELL" = "bash" ]; then
+ SPAWN="bash -li";
+fi;
+
+cd $HOME;
+exec $SPAWN;
diff --git a/testing/docker/desktop-test/tc-vcs-config.yml b/testing/docker/desktop-test/tc-vcs-config.yml
new file mode 100644
index 000000000..25e13ee40
--- /dev/null
+++ b/testing/docker/desktop-test/tc-vcs-config.yml
@@ -0,0 +1,40 @@
+# Default configuration used by the tc-vs tools these can be overridden by
+# passing the config you wish to use over the command line...
+git: git
+hg: hg
+
+repoCache:
+ # Repo url to clone when running repo init..
+ repoUrl: https://gerrit.googlesource.com/git-repo.git
+ # Version of repo to utilize...
+ repoRevision: master
+ # The root where all downloaded cache files are stored on the local machine...
+ cacheDir: '{{env.HOME}}/.tc-vcs-repo/'
+ # Name/prefixed used as part of the base url.
+ cacheName: sources/{{name}}.tar.gz
+ # Command used to upload the tarball
+ uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'"
+ # Large http get requests are often slower using nodes built in http layer so
+ # we utilize a subprocess which is responsible for fetching...
+ get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}}
+ # Used to create clone tarball
+ compress: tar -czf {{dest}} {{source}}
+ # All cache urls use tar + gz this is the command used to extract those files
+ # downloaded by the "get" command.
+ extract: tar -x -z -C {{dest}} -f {{source}}
+
+cloneCache:
+ # The root where all downloaded cache files are stored on the local machine...
+ cacheDir: '{{env.HOME}}/.tc-vcs/'
+ # Command used to upload the tarball
+ uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'"
+ # Large http get requests are often slower using nodes built in http layer so
+ # we utilize a subprocess which is responsible for fetching...
+ get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}}
+ # Used to create clone tarball
+ compress: tar -czf {{dest}} {{source}}
+ # All cache urls use tar + gz this is the command used to extract those files
+ # downloaded by the "get" command.
+ extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}}
+ # Name/prefixed used as part of the base url.
+ cacheName: clones/{{name}}.tar.gz
diff --git a/testing/docker/desktop-test/tester.env b/testing/docker/desktop-test/tester.env
new file mode 100644
index 000000000..1bcac6132
--- /dev/null
+++ b/testing/docker/desktop-test/tester.env
@@ -0,0 +1,4 @@
+GAIA_REV=tip
+GAIA_REF=tip
+GAIA_BASE_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
+GAIA_HEAD_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
diff --git a/testing/docker/desktop1604-test/Dockerfile b/testing/docker/desktop1604-test/Dockerfile
new file mode 100644
index 000000000..929b167c2
--- /dev/null
+++ b/testing/docker/desktop1604-test/Dockerfile
@@ -0,0 +1,116 @@
+FROM ubuntu:16.04
+MAINTAINER Joel Maher <joel.maher@gmail.com>
+
+RUN useradd -d /home/worker -s /bin/bash -m worker
+WORKDIR /home/worker
+
+# %include testing/docker/recipes/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /setup/tooltool.py
+
+# %include testing/mozharness/external_tools/robustcheckout.py
+ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
+
+# %include testing/docker/recipes/common.sh
+ADD topsrcdir/testing/docker/recipes/common.sh /setup/common.sh
+
+# %include testing/docker/recipes/install-mercurial.sh
+ADD topsrcdir/testing/docker/recipes/install-mercurial.sh /setup/install-mercurial.sh
+
+# Add the tooltool manifest containing the minidump_stackwalk binary.
+# %include testing/config/tooltool-manifests/linux64/releng.manifest
+ADD topsrcdir/testing/config/tooltool-manifests/linux64/releng.manifest /tmp/minidump_stackwalk.manifest
+
+# %include testing/docker/recipes/ubuntu1604-test-system-setup.sh
+ADD topsrcdir/testing/docker/recipes/ubuntu1604-test-system-setup.sh /setup/system-setup.sh
+RUN bash /setup/system-setup.sh
+
+# Add wrapper scripts for xvfb allowing tasks to easily retry starting up xvfb
+# %include testing/docker/recipes/xvfb.sh
+ADD topsrcdir/testing/docker/recipes/xvfb.sh /home/worker/scripts/xvfb.sh
+
+# %include testing/docker/recipes/run-task
+ADD topsrcdir/testing/docker/recipes/run-task /home/worker/bin/run-task
+
+# %include taskcluster/scripts/tester/test-ubuntu.sh
+ADD topsrcdir/taskcluster/scripts/tester/test-ubuntu.sh /home/worker/bin/test-linux.sh
+
+# This will create a host mounted filesystem when the cache is stripped
+# on Try. This cancels out some of the performance losses of aufs. See
+# bug 1291940.
+VOLUME /home/worker/checkouts
+VOLUME /home/worker/workspace
+
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
+ENV HOSTNAME taskcluster-worker
+ENV LANG en_US.UTF-8
+ENV LC_ALL en_US.UTF-8
+
+# Add utilities and configuration
+COPY dot-files/config /home/worker/.config
+COPY dot-files/pulse /home/worker/.pulse
+COPY bin /home/worker/bin
+RUN chmod +x bin/*
+# TODO: remove this when buildbot is gone
+COPY buildprops.json /home/worker/buildprops.json
+COPY tc-vcs-config.yml /etc/taskcluster-vcs.yml
+
+# TODO: remove
+ADD https://raw.githubusercontent.com/taskcluster/buildbot-step/master/buildbot_step /home/worker/bin/buildbot_step
+RUN chmod u+x /home/worker/bin/buildbot_step
+
+# allow the worker user to access video devices
+RUN usermod -a -G video worker
+
+RUN mkdir Documents; mkdir Pictures; mkdir Music; mkdir Videos; mkdir artifacts
+
+# install a new enough npm, plus tc-vcs and tc-npm-cache
+RUN npm install -g npm@^2.0.0 \
+ && npm install -g taskcluster-vcs@2.3.12 \
+ && npm install -g taskcluster-npm-cache@1.1.14 \
+ && rm -rf ~/.npm
+ENV PATH $PATH:/home/worker/bin
+
+# TODO Re-enable worker when bug 1093833 lands
+#USER worker
+
+# clean up
+RUN rm -Rf .cache && mkdir -p .cache
+
+# Disable Ubuntu update prompt
+# http://askubuntu.com/questions/515161/ubuntu-12-04-disable-release-notification-of-14-04-in-update-manager
+ADD release-upgrades /etc/update-manager/release-upgrades
+
+# Disable tools with on-login popups that interfere with tests; see bug 1240084 and bug 984944.
+ADD autostart/jockey-gtk.desktop autostart/deja-dup-monitor.desktop /etc/xdg/autostart/
+
+# Bug 1345105 - Do not run periodical update checks and downloads
+ADD autostart/gnome-software-service.desktop /etc/xdg/autostart/
+
+# In test.sh we accept START_VNC to start a vnc daemon.
+# Exposing this port allows it to work.
+EXPOSE 5900
+
+# This helps not forgetting setting DISPLAY=:0 when running
+# tests outside of test.sh
+ENV DISPLAY :0
+
+# Disable apport (Ubuntu app crash reporter) to avoid stealing focus from test runs
+ADD apport /etc/default/apport
+
+# Disable font antialiasing for now to match releng's setup
+ADD fonts.conf /home/worker/.fonts.conf
+
+# Set up first-run experience for interactive mode
+ADD motd /etc/taskcluster-motd
+ADD taskcluster-interactive-shell /bin/taskcluster-interactive-shell
+RUN chmod +x /bin/taskcluster-interactive-shell
+
+RUN chown -R worker:worker /home/worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/desktop1604-test/apport b/testing/docker/desktop1604-test/apport
new file mode 100644
index 000000000..42e5f8d3a
--- /dev/null
+++ b/testing/docker/desktop1604-test/apport
@@ -0,0 +1 @@
+enabled=0
diff --git a/testing/docker/desktop1604-test/autostart/deja-dup-monitor.desktop b/testing/docker/desktop1604-test/autostart/deja-dup-monitor.desktop
new file mode 100644
index 000000000..c3b8a4c67
--- /dev/null
+++ b/testing/docker/desktop1604-test/autostart/deja-dup-monitor.desktop
@@ -0,0 +1,19 @@
+[Desktop Entry]
+Version=1.0
+X-Ubuntu-Gettext-Domain=deja-dup
+
+Name=Backup Monitor
+Comment=Schedules backups at regular intervals
+
+Icon=deja-dup
+TryExec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor
+Exec=/usr/lib/deja-dup/deja-dup/deja-dup-monitor
+
+# Bug 984944/1240084 - It prevents taking screenshots
+X-GNOME-Autostart-Delay=false
+
+StartupNotify=false
+NoDisplay=true
+
+Type=Application
+Categories=System;Utility;Archiving;
diff --git a/testing/docker/desktop1604-test/autostart/gnome-software-service.desktop b/testing/docker/desktop1604-test/autostart/gnome-software-service.desktop
new file mode 100644
index 000000000..b563cc306
--- /dev/null
+++ b/testing/docker/desktop1604-test/autostart/gnome-software-service.desktop
@@ -0,0 +1,9 @@
+[Desktop Entry]
+Type=Application
+Name=GNOME Software
+Exec=/usr/bin/gnome-software --gapplication-service
+OnlyShowIn=GNOME;Unity;
+X-Ubuntu-Gettext-Domain=gnome-software
+
+# Bug 1345105 - Do not run periodical update checks and downloads
+X-GNOME-Autostart-enabled=false
diff --git a/testing/docker/desktop1604-test/autostart/jockey-gtk.desktop b/testing/docker/desktop1604-test/autostart/jockey-gtk.desktop
new file mode 100644
index 000000000..e433ba898
--- /dev/null
+++ b/testing/docker/desktop1604-test/autostart/jockey-gtk.desktop
@@ -0,0 +1,15 @@
+[Desktop Entry]
+Name=Check for new hardware drivers
+Comment=Notify about new hardware drivers available for the system
+Icon=jockey
+Exec=sh -c "test -e /var/cache/jockey/check || exec jockey-gtk --check"
+Terminal=false
+Type=Application
+Categories=System;Settings;GTK;HardwareSettings;
+NotShowIn=KDE;
+X-Ubuntu-Gettext-Domain=jockey
+
+# Bug 984944/1240084 - It prevents taking screenshots
+X-GNOME-Autostart-Delay=false
+
+NoDisplay=true
diff --git a/testing/docker/desktop1604-test/bin/run-wizard b/testing/docker/desktop1604-test/bin/run-wizard
new file mode 100755
index 000000000..88c84045c
--- /dev/null
+++ b/testing/docker/desktop1604-test/bin/run-wizard
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this,
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import print_function, unicode_literals
+
+import os
+import subprocess
+import sys
+from textwrap import wrap
+
+
+def call(cmd, **kwargs):
+ print(" ".join(cmd))
+ return subprocess.call(cmd, **kwargs)
+
+
+def resume():
+ call(['run-mozharness'])
+
+
+def setup():
+ call(['run-mozharness', '--no-run-tests'])
+ print("Mozharness has finished downloading the build and "
+ "tests to {}.".format(os.path.join(os.getcwd(), 'build')))
+
+
+def clone():
+ repo = os.environ['GECKO_HEAD_REPOSITORY']
+ rev = os.environ['GECKO_HEAD_REV']
+ clone_path = os.path.expanduser(os.path.join('~', 'gecko'))
+
+ # try is too large to clone, instead clone central and pull
+ # in changes from try
+ if "hg.mozilla.org/try" in repo:
+ central = 'http://hg.mozilla.org/mozilla-central'
+ call(['hg', 'clone', '-U', central, clone_path])
+ call(['hg', 'pull', '-u', '-r', rev, repo], cwd=clone_path)
+ else:
+ call(['hg', 'clone', '-u', rev, repo, clone_path])
+ print("Finished cloning to {} at revision {}.".format(
+ clone_path, rev))
+
+
+def exit():
+ pass
+
+
+OPTIONS = [
+ ('Resume task', resume,
+ "Resume the original task without modification. This can be useful for "
+ "passively monitoring it from another shell."),
+ ('Setup task', setup,
+ "Setup the task (download the application and tests) but don't run the "
+ "tests just yet. The tests can be run with a custom configuration later "
+ "(experimental)."),
+ ('Clone gecko', clone,
+ "Perform a clone of gecko using the task's repo and update it to the "
+ "task's revision."),
+ ('Exit', exit, "Exit this wizard and return to the shell.")
+]
+
+
+def _fmt_options():
+ max_line_len = 60
+ max_name_len = max(len(o[0]) for o in OPTIONS)
+
+ # TODO Pad will be off if there are more than 9 options.
+ pad = ' ' * (max_name_len+6)
+
+ msg = []
+ for i, (name, _, desc) in enumerate(OPTIONS):
+ desc = wrap(desc, width=max_line_len)
+ desc = [desc[0]] + [pad + l for l in desc[1:]]
+
+ optstr = '{}) {} - {}\n'.format(
+ i+1, name.ljust(max_name_len), '\n'.join(desc))
+ msg.append(optstr)
+ msg.append("Select one of the above options: ")
+ return '\n'.join(msg)
+
+
+def wizard():
+ print("This wizard can help you get started with some common debugging "
+ "workflows.\nWhat would you like to do?\n")
+ print(_fmt_options(), end="")
+ choice = None
+ while True:
+ choice = raw_input().decode('utf8')
+ try:
+ choice = int(choice)-1
+ if 0 <= choice < len(OPTIONS):
+ break
+ except ValueError:
+ pass
+
+ print("Must provide an integer from 1-{}:".format(len(OPTIONS)))
+
+ func = OPTIONS[choice][1]
+ ret = func()
+
+ print("Use the 'run-wizard' command to start this wizard again.")
+ return ret
+
+
+if __name__ == '__main__':
+ sys.exit(wizard())
diff --git a/testing/docker/desktop1604-test/buildprops.json b/testing/docker/desktop1604-test/buildprops.json
new file mode 100644
index 000000000..f0967b026
--- /dev/null
+++ b/testing/docker/desktop1604-test/buildprops.json
@@ -0,0 +1,8 @@
+{
+ "properties": {
+ "buildername": ""
+ },
+ "sourcestamp": {
+ "changes": []
+ }
+}
diff --git a/testing/docker/desktop1604-test/dot-files/config/pip/pip.conf b/testing/docker/desktop1604-test/dot-files/config/pip/pip.conf
new file mode 100644
index 000000000..73c2b2a52
--- /dev/null
+++ b/testing/docker/desktop1604-test/dot-files/config/pip/pip.conf
@@ -0,0 +1,2 @@
+[global]
+disable-pip-version-check = true
diff --git a/testing/docker/desktop1604-test/dot-files/config/user-dirs.dirs b/testing/docker/desktop1604-test/dot-files/config/user-dirs.dirs
new file mode 100644
index 000000000..2db2718d2
--- /dev/null
+++ b/testing/docker/desktop1604-test/dot-files/config/user-dirs.dirs
@@ -0,0 +1,15 @@
+# This file is written by xdg-user-dirs-update
+# If you want to change or add directories, just edit the line you're
+# interested in. All local changes will be retained on the next run
+# Format is XDG_xxx_DIR="$HOME/yyy", where yyy is a shell-escaped
+# homedir-relative path, or XDG_xxx_DIR="/yyy", where /yyy is an
+# absolute path. No other format is supported.
+
+XDG_DESKTOP_DIR="$HOME/Desktop"
+XDG_DOWNLOAD_DIR="$HOME/Downloads"
+XDG_TEMPLATES_DIR="$HOME/Templates"
+XDG_PUBLICSHARE_DIR="$HOME/Public"
+XDG_DOCUMENTS_DIR="$HOME/Documents"
+XDG_MUSIC_DIR="$HOME/Music"
+XDG_PICTURES_DIR="$HOME/Pictures"
+XDG_VIDEOS_DIR="$HOME/Videos"
diff --git a/testing/docker/desktop1604-test/dot-files/config/user-dirs.locale b/testing/docker/desktop1604-test/dot-files/config/user-dirs.locale
new file mode 100644
index 000000000..7741b83a3
--- /dev/null
+++ b/testing/docker/desktop1604-test/dot-files/config/user-dirs.locale
@@ -0,0 +1 @@
+en_US
diff --git a/testing/docker/desktop1604-test/dot-files/pulse/default.pa b/testing/docker/desktop1604-test/dot-files/pulse/default.pa
new file mode 100644
index 000000000..39bb44aa7
--- /dev/null
+++ b/testing/docker/desktop1604-test/dot-files/pulse/default.pa
@@ -0,0 +1,164 @@
+#!/usr/bin/pulseaudio -nF
+#
+# This file is part of PulseAudio.
+#
+# PulseAudio is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# PulseAudio is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with PulseAudio; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+# This startup script is used only if PulseAudio is started per-user
+# (i.e. not in system mode)
+
+.nofail
+
+### Load something into the sample cache
+#load-sample-lazy x11-bell /usr/share/sounds/gtk-events/activate.wav
+#load-sample-lazy pulse-hotplug /usr/share/sounds/startup3.wav
+#load-sample-lazy pulse-coldplug /usr/share/sounds/startup3.wav
+#load-sample-lazy pulse-access /usr/share/sounds/generic.wav
+
+.fail
+
+### Automatically restore the volume of streams and devices
+load-module module-device-restore
+load-module module-stream-restore
+load-module module-card-restore
+
+### Automatically augment property information from .desktop files
+### stored in /usr/share/application
+load-module module-augment-properties
+
+### Load audio drivers statically
+### (it's probably better to not load these drivers manually, but instead
+### use module-udev-detect -- see below -- for doing this automatically)
+#load-module module-alsa-sink
+#load-module module-alsa-source device=hw:1,0
+#load-module module-oss device="/dev/dsp" sink_name=output source_name=input
+#load-module module-oss-mmap device="/dev/dsp" sink_name=output source_name=input
+#load-module module-null-sink
+#load-module module-pipe-sink
+
+### Automatically load driver modules depending on the hardware available
+.ifexists module-udev-detect.so
+load-module module-udev-detect
+.else
+### Use the static hardware detection module (for systems that lack udev/hal support)
+load-module module-detect
+.endif
+
+### Automatically connect sink and source if JACK server is present
+.ifexists module-jackdbus-detect.so
+.nofail
+load-module module-jackdbus-detect
+.fail
+.endif
+
+### Automatically load driver modules for Bluetooth hardware
+# This module causes a pulseaudio startup failure on "gecko-tester"
+#.ifexists module-bluetooth-discover.so
+#load-module module-bluetooth-discover
+#.endif
+
+### Load several protocols
+.ifexists module-esound-protocol-unix.so
+load-module module-esound-protocol-unix
+.endif
+load-module module-native-protocol-unix
+
+### Network access (may be configured with paprefs, so leave this commented
+### here if you plan to use paprefs)
+#load-module module-esound-protocol-tcp
+#load-module module-native-protocol-tcp
+#load-module module-zeroconf-publish
+
+### Load the RTP receiver module (also configured via paprefs, see above)
+#load-module module-rtp-recv
+
+### Load the RTP sender module (also configured via paprefs, see above)
+#load-module module-null-sink sink_name=rtp format=s16be channels=2 rate=44100 sink_properties="device.description='RTP Multicast Sink'"
+#load-module module-rtp-send source=rtp.monitor
+
+### Load additional modules from GConf settings. This can be configured with the paprefs tool.
+### Please keep in mind that the modules configured by paprefs might conflict with manually
+### loaded modules.
+.ifexists module-gconf.so
+.nofail
+load-module module-gconf
+.fail
+.endif
+
+### Automatically restore the default sink/source when changed by the user
+### during runtime
+### NOTE: This should be loaded as early as possible so that subsequent modules
+### that look up the default sink/source get the right value
+load-module module-default-device-restore
+
+### Automatically move streams to the default sink if the sink they are
+### connected to dies, similar for sources
+load-module module-rescue-streams
+
+### Make sure we always have a sink around, even if it is a null sink.
+load-module module-always-sink
+
+### Honour intended role device property
+load-module module-intended-roles
+
+### Automatically suspend sinks/sources that become idle for too long
+load-module module-suspend-on-idle
+
+### If autoexit on idle is enabled we want to make sure we only quit
+### when no local session needs us anymore.
+# This module causes a pulseaudio startup failure on "gecko-tester"
+#.ifexists module-console-kit.so
+#load-module module-console-kit
+#.endif
+
+### Enable positioned event sounds
+load-module module-position-event-sounds
+
+### Cork music streams when a phone stream is active
+#load-module module-cork-music-on-phone
+
+### Modules to allow autoloading of filters (such as echo cancellation)
+### on demand. module-filter-heuristics tries to determine what filters
+### make sense, and module-filter-apply does the heavy-lifting of
+### loading modules and rerouting streams.
+load-module module-filter-heuristics
+load-module module-filter-apply
+
+### Load DBus protocol
+#.ifexists module-dbus-protocol.so
+#load-module module-dbus-protocol
+#.endif
+
+# X11 modules should not be started from default.pa so that one daemon
+# can be shared by multiple sessions.
+
+### Load X11 bell module
+#load-module module-x11-bell sample=bell-windowing-system
+
+### Register ourselves in the X11 session manager
+#load-module module-x11-xsmp
+
+### Publish connection data in the X11 root window
+#.ifexists module-x11-publish.so
+#.nofail
+#load-module module-x11-publish
+#.fail
+#.endif
+
+load-module module-switch-on-port-available
+
+### Make some devices default
+#set-default-sink output
+#set-default-source input
diff --git a/testing/docker/desktop1604-test/fonts.conf b/testing/docker/desktop1604-test/fonts.conf
new file mode 100644
index 000000000..9784fcc98
--- /dev/null
+++ b/testing/docker/desktop1604-test/fonts.conf
@@ -0,0 +1,5 @@
+<match target="font">
+ <edit name="antialias" mode="assign">
+ <bool>false</bool>
+ </edit>
+</match>
diff --git a/testing/docker/desktop1604-test/motd b/testing/docker/desktop1604-test/motd
new file mode 100644
index 000000000..f958393cd
--- /dev/null
+++ b/testing/docker/desktop1604-test/motd
@@ -0,0 +1,6 @@
+Welcome to your taskcluster interactive shell! The regularly scheduled task
+has been paused to give you a chance to set up your debugging environment.
+
+For your convenience, the exact mozharness command needed for this task can
+be invoked using the 'run-mozharness' command.
+
diff --git a/testing/docker/desktop1604-test/release-upgrades b/testing/docker/desktop1604-test/release-upgrades
new file mode 100644
index 000000000..d714f1d82
--- /dev/null
+++ b/testing/docker/desktop1604-test/release-upgrades
@@ -0,0 +1,17 @@
+# Default behavior for the release upgrader.
+
+[DEFAULT]
+# Default prompting behavior, valid options:
+#
+# never - Never check for a new release.
+# normal - Check to see if a new release is available. If more than one new
+# release is found, the release upgrader will attempt to upgrade to
+# the release that immediately succeeds the currently-running
+# release.
+# lts - Check to see if a new LTS release is available. The upgrader
+# will attempt to upgrade to the first LTS release available after
+# the currently-running one. Note that this option should not be
+# used if the currently-running release is not itself an LTS
+# release, since in that case the upgrader won't be able to
+# determine if a newer release is available.
+Prompt=never
diff --git a/testing/docker/desktop1604-test/taskcluster-interactive-shell b/testing/docker/desktop1604-test/taskcluster-interactive-shell
new file mode 100644
index 000000000..e569b40d3
--- /dev/null
+++ b/testing/docker/desktop1604-test/taskcluster-interactive-shell
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+/home/worker/bin/run-wizard;
+
+SPAWN="$SHELL";
+
+if [ "$SHELL" = "bash" ]; then
+ SPAWN="bash -li";
+fi;
+
+exec $SPAWN;
diff --git a/testing/docker/desktop1604-test/tc-vcs-config.yml b/testing/docker/desktop1604-test/tc-vcs-config.yml
new file mode 100644
index 000000000..25e13ee40
--- /dev/null
+++ b/testing/docker/desktop1604-test/tc-vcs-config.yml
@@ -0,0 +1,40 @@
+# Default configuration used by the tc-vs tools these can be overridden by
+# passing the config you wish to use over the command line...
+git: git
+hg: hg
+
+repoCache:
+ # Repo url to clone when running repo init..
+ repoUrl: https://gerrit.googlesource.com/git-repo.git
+ # Version of repo to utilize...
+ repoRevision: master
+ # The root where all downloaded cache files are stored on the local machine...
+ cacheDir: '{{env.HOME}}/.tc-vcs-repo/'
+ # Name/prefixed used as part of the base url.
+ cacheName: sources/{{name}}.tar.gz
+ # Command used to upload the tarball
+ uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'"
+ # Large http get requests are often slower using nodes built in http layer so
+ # we utilize a subprocess which is responsible for fetching...
+ get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}}
+ # Used to create clone tarball
+ compress: tar -czf {{dest}} {{source}}
+ # All cache urls use tar + gz this is the command used to extract those files
+ # downloaded by the "get" command.
+ extract: tar -x -z -C {{dest}} -f {{source}}
+
+cloneCache:
+ # The root where all downloaded cache files are stored on the local machine...
+ cacheDir: '{{env.HOME}}/.tc-vcs/'
+ # Command used to upload the tarball
+ uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'"
+ # Large http get requests are often slower using nodes built in http layer so
+ # we utilize a subprocess which is responsible for fetching...
+ get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}}
+ # Used to create clone tarball
+ compress: tar -czf {{dest}} {{source}}
+ # All cache urls use tar + gz this is the command used to extract those files
+ # downloaded by the "get" command.
+ extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}}
+ # Name/prefixed used as part of the base url.
+ cacheName: clones/{{name}}.tar.gz
diff --git a/testing/docker/desktop1604-test/tester.env b/testing/docker/desktop1604-test/tester.env
new file mode 100644
index 000000000..1bcac6132
--- /dev/null
+++ b/testing/docker/desktop1604-test/tester.env
@@ -0,0 +1,4 @@
+GAIA_REV=tip
+GAIA_REF=tip
+GAIA_BASE_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
+GAIA_HEAD_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
diff --git a/testing/docker/firefox-snap/Dockerfile b/testing/docker/firefox-snap/Dockerfile
new file mode 100644
index 000000000..584bd3e4f
--- /dev/null
+++ b/testing/docker/firefox-snap/Dockerfile
@@ -0,0 +1,3 @@
+FROM ubuntu:16.04
+
+RUN apt-get update && apt-get install -qy snapcraft bzip2 curl && apt-get clean
diff --git a/testing/docker/firefox-snap/Makefile b/testing/docker/firefox-snap/Makefile
new file mode 100644
index 000000000..d71dc7088
--- /dev/null
+++ b/testing/docker/firefox-snap/Makefile
@@ -0,0 +1,12 @@
+DOCKERIO_USERNAME =$(error DOCKERIO_USERNAME should be set)
+IMAGE_NAME = firefox-snapcraft
+FULL_IMAGE_NAME = $(DOCKERIO_USERNAME)/$(IMAGE_NAME)
+
+build:
+ docker build -t $(FULL_IMAGE_NAME) --no-cache --rm .
+
+push:
+ docker push $(FULL_IMAGE_NAME):latest
+
+pull:
+ docker pull $(FULL_IMAGE_NAME):latest
diff --git a/testing/docker/firefox-snap/distribution.ini b/testing/docker/firefox-snap/distribution.ini
new file mode 100644
index 000000000..ffa5f3dd5
--- /dev/null
+++ b/testing/docker/firefox-snap/distribution.ini
@@ -0,0 +1,9 @@
+[Global]
+id=mozilla-snap
+version=1.0
+about=Mozilla Firefox Snap
+
+[Preferences]
+app.update.enabled=false
+intl.locale.matchOS=true
+browser.shell.checkDefaultBrowser=false
diff --git a/testing/docker/firefox-snap/runme.sh b/testing/docker/firefox-snap/runme.sh
new file mode 100755
index 000000000..580ac2443
--- /dev/null
+++ b/testing/docker/firefox-snap/runme.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+
+set -xe
+
+# Required env variables
+test $VERSION
+test $BUILD_NUMBER
+test $CANDIDATES_DIR
+
+# Optional env variables
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+: ARTIFACTS_DIR ${ARTIFACTS_DIR:=/home/worker/artifacts}
+
+
+TARGET="firefox-${VERSION}.snap"
+
+mkdir -p "$ARTIFACTS_DIR"
+rm -rf "${WORKSPACE}/source" && mkdir -p "${WORKSPACE}/source/opt" "${WORKSPACE}/source/usr/bin"
+
+CURL="curl --location --retry 10 --retry-delay 10"
+
+# Download and extract en-US linux64 binary
+$CURL -o "${WORKSPACE}/firefox.tar.bz2" \
+ "${CANDIDATES_DIR}/${VERSION}-candidates/build${BUILD_NUMBER}/linux-x86_64/en-US/firefox-${VERSION}.tar.bz2"
+
+tar -C "${WORKSPACE}/source/opt" -xf "${WORKSPACE}/firefox.tar.bz2"
+mkdir -p "${WORKSPACE}/source/opt/firefox/distribution/extensions"
+cp -v distribution.ini "${WORKSPACE}/source/opt/firefox/distribution/"
+
+# Use release-specific list of locales to fetch L10N XPIs
+$CURL -o "${WORKSPACE}/l10n_changesets.txt" "${CANDIDATES_DIR}/${VERSION}-candidates/build${BUILD_NUMBER}/l10n_changesets.txt"
+cat "${WORKSPACE}/l10n_changesets.txt"
+
+for locale in $(grep -v ja-JP-mac "${WORKSPACE}/l10n_changesets.txt" | awk '{print $1}'); do
+ $CURL -o "${WORKSPACE}/source/opt/firefox/distribution/extensions/langpack-${locale}@firefox.mozilla.org.xpi" \
+ "$CANDIDATES_DIR/${VERSION}-candidates/build${BUILD_NUMBER}/linux-x86_64/xpi/${locale}.xpi"
+done
+
+# Symlink firefox binary to /usr/bin to make it available in PATH
+ln -s ../../opt/firefox/firefox "${WORKSPACE}/source/usr/bin"
+
+# Generate snapcraft manifest
+sed -e "s/@VERSION@/${VERSION}/g" -e "s/@BUILD_NUMBER@/${BUILD_NUMBER}/g" snapcraft.yaml.in > ${WORKSPACE}/snapcraft.yaml
+cd ${WORKSPACE}
+snapcraft
+
+mv *.snap "$ARTIFACTS_DIR/$TARGET"
+
+cd $ARTIFACTS_DIR
+
+# Generate checksums file
+size=$(stat --printf="%s" $ARTIFACTS_DIR/$TARGET)
+sha=$(sha512sum $ARTIFACTS_DIR/$TARGET | awk '{print $1}')
+echo "$sha sha512 $size $TARGET" > $TARGET.checksums
+
+echo "Generating signing manifest"
+hash=$(sha512sum $TARGET.checksums | awk '{print $1}')
+
+cat << EOF > signing_manifest.json
+[{"file_to_sign": "$TARGET.checksums", "hash": "$hash"}]
+EOF
+
+# For posterity
+find . -ls
+cat $TARGET.checksums
+cat signing_manifest.json
diff --git a/testing/docker/firefox-snap/snapcraft.yaml.in b/testing/docker/firefox-snap/snapcraft.yaml.in
new file mode 100644
index 000000000..9448c104d
--- /dev/null
+++ b/testing/docker/firefox-snap/snapcraft.yaml.in
@@ -0,0 +1,37 @@
+name: firefox
+version: @VERSION@-@BUILD_NUMBER@
+summary: Mozilla Firefox web browser
+description: Firefox is a powerful, extensible web browser with support for modern web application technologies.
+confinement: strict
+
+apps:
+ firefox:
+ command: desktop-launch firefox
+ plugs:
+ - unity7
+ - network
+ - home
+ - x11
+ - opengl
+ - pulseaudio
+ - gsettings
+ - camera
+ - browser-sandbox
+
+plugs:
+ browser-sandbox:
+ interface: browser-support
+ allow-sandbox: true
+
+parts:
+ firefox:
+ plugin: dump
+ source: source
+ stage-packages:
+ - libxt6
+ - libdbus-glib-1-2
+ - libasound2
+ - libpulse0
+ - libgl1-mesa-dri
+ - libgl1-mesa-glx
+ after: [desktop-gtk3]
diff --git a/testing/docker/funsize-balrog-submitter/Dockerfile b/testing/docker/funsize-balrog-submitter/Dockerfile
new file mode 100644
index 000000000..282f98b2c
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/Dockerfile
@@ -0,0 +1,35 @@
+FROM ubuntu:vivid
+MAINTAINER Rail Aliiev <rail@mozilla.com>
+
+# Required software
+ENV DEBIAN_FRONTEND noninteractive
+# Ubuntu Vivid has been moved to the old-releases repo
+RUN sed -i -e 's/archive.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list
+# Chain apt-get commands with apt-get clean in a single docker RUN
+# to make sure that files are removed within a single docker layer
+RUN apt-get update -q && \
+ apt-get install -yyq --no-install-recommends \
+ python mercurial curl python-boto python-setuptools python-cryptography && \
+ apt-get clean
+
+COPY requirements.txt /tmp/
+# python-pip installs a lot of dependencies increasing the size of an image
+# drastically.
+RUN easy_install pip
+RUN pip install -r /tmp/requirements.txt
+
+RUN hg clone https://hg.mozilla.org/build/tools /home/worker/tools
+
+RUN useradd -d /home/worker -s /bin/bash -m worker
+
+RUN mkdir /home/worker/bin
+COPY scripts/* /home/worker/bin/
+RUN mkdir /home/worker/keys
+COPY *.pubkey /home/worker/keys/
+COPY runme.sh /runme.sh
+RUN chmod 755 /home/worker/bin/* /runme.sh
+
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
diff --git a/testing/docker/funsize-balrog-submitter/Makefile b/testing/docker/funsize-balrog-submitter/Makefile
new file mode 100644
index 000000000..2e46ee493
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/Makefile
@@ -0,0 +1,17 @@
+DOCKERIO_USERNAME =$(error DOCKERIO_USERNAME should be set)
+IMAGE_NAME = funsize-balrog-submitter
+FULL_IMAGE_NAME = $(DOCKERIO_USERNAME)/$(IMAGE_NAME)
+
+build:
+ docker build -t $(FULL_IMAGE_NAME) --no-cache --rm .
+
+push:
+ docker push $(FULL_IMAGE_NAME):latest
+
+pull:
+ docker pull $(FULL_IMAGE_NAME):latest
+
+update_pubkeys:
+ curl https://hg.mozilla.org/mozilla-central/raw-file/default/toolkit/mozapps/update/updater/nightly_aurora_level3_primary.der | openssl x509 -inform DER -pubkey -noout > nightly.pubkey
+ curl https://hg.mozilla.org/mozilla-central/raw-file/default/toolkit/mozapps/update/updater/dep1.der | openssl x509 -inform DER -pubkey -noout > dep.pubkey
+ curl https://hg.mozilla.org/mozilla-central/raw-file/default/toolkit/mozapps/update/updater/release_primary.der | openssl x509 -inform DER -pubkey -noout > release.pubkey
diff --git a/testing/docker/funsize-balrog-submitter/dep.pubkey b/testing/docker/funsize-balrog-submitter/dep.pubkey
new file mode 100644
index 000000000..a1213a57e
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/dep.pubkey
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzjHSobdeiQ3JHP/cCIOp
+WaX9y12rL5mIo9OR9bpqEZdD0yXJJJeZA887Mv8slqsM+qObMUpKvfEE6zyYPIZJ
+ANib31neI5BBYHhfhf2f5EnkilSYlmU3Gx+uRsmsdt58PpYe124tOAGgca/8bUy3
+eb6kUUTwvMI0oWQuPkGUaoHVQyj/bBMTrIkyF3UbfFtiX/SfOPvIoabNUe+pQHUe
+pqC2+RxzDGj+shTq/hYhtXlptFzsEEb2+0foLy0MY8C30dP2QqbM2iavvr/P8OcS
+Gm3H0TQcRzIEBzvPcIjiZi1nQj/r/3TlYRNCjuYT/HsNLXrB/U5Tc990jjAUJxdH
+0wIDAQAB
+-----END PUBLIC KEY-----
diff --git a/testing/docker/funsize-balrog-submitter/nightly.pubkey b/testing/docker/funsize-balrog-submitter/nightly.pubkey
new file mode 100644
index 000000000..93c0904d5
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/nightly.pubkey
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4j/IS3gWbyVVnWn4ZRnC
+Fuzb6VAaHa0I+4E504ekhVAhbKlSfBstkLbXajdjUVAJpn02zWnOaTl5KAdpDpIp
+SkdA4mK20ej3/Ij7gIt8IwaX+ArXL8mP84pxDn5BgaNADm3206Z6YQzc/TDYu529
+qkDFmLqNUVRJAhPO+qqhKHIcVGh8HUHXN6XV1qOFip+UU0M474jAGgurVmAv8Rh7
+VvM0v5KmB6V6WHwM5gwjg2yRY/o+xYIsNeSes9rpp+MOs/RnUA6LI4WZGY4YahvX
+VclIXBDgbWPYtojexIJkmYj8JIIRsh3eCsrRRe14fq7cBurp3CxBYMlDHf0RUoaq
+hQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/testing/docker/funsize-balrog-submitter/release.pubkey b/testing/docker/funsize-balrog-submitter/release.pubkey
new file mode 100644
index 000000000..20df95946
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/release.pubkey
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvH4r94FpQ0gvr1hhTfV9
+NUeWPJ5CN6TZRq7v/Dc4nkJ1J4IP1B3UEii34tcNKpy1nKupiZuTT6T1zQYT+z5x
+3UkDF9qQboQ8RNb/BEz/cN3on/LTEnZ7YSraRL11M6cEB8mvmJxddCEquwqccRbs
+Usp8WUB7uRv1w6Anley7N9F/LE1iLPwJasZypRnzWb3aYsJy0cMFOYy+OXVdpktn
+qYqlNIjnt84u4Nil6UXnBbIJNUVOCY8wOFClNvVpubjPkWK1gtdWy3x/hJU5RpAO
+K9cnHxq4M/I4SUWTWO3r7yweQiHG4Jyoc7sP1jkwjBkSG93sDEycfwOdOoZft3wN
+sQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/testing/docker/funsize-balrog-submitter/requirements.txt b/testing/docker/funsize-balrog-submitter/requirements.txt
new file mode 100644
index 000000000..aec79b364
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/requirements.txt
@@ -0,0 +1 @@
+mar==1.2
diff --git a/testing/docker/funsize-balrog-submitter/runme.sh b/testing/docker/funsize-balrog-submitter/runme.sh
new file mode 100644
index 000000000..ecf222403
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/runme.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -xe
+
+test $PARENT_TASK_ARTIFACTS_URL_PREFIX
+test $BALROG_API_ROOT
+test $SIGNING_CERT
+
+ARTIFACTS_DIR="/home/worker/artifacts"
+mkdir -p "$ARTIFACTS_DIR"
+
+curl --location --retry 10 --retry-delay 10 -o "$ARTIFACTS_DIR/manifest.json" \
+ "$PARENT_TASK_ARTIFACTS_URL_PREFIX/manifest.json"
+
+cat "$ARTIFACTS_DIR/manifest.json"
+python /home/worker/bin/funsize-balrog-submitter.py \
+ --artifacts-url-prefix "$PARENT_TASK_ARTIFACTS_URL_PREFIX" \
+ --manifest "$ARTIFACTS_DIR/manifest.json" \
+ -a "$BALROG_API_ROOT" \
+ --signing-cert "/home/worker/keys/${SIGNING_CERT}.pubkey" \
+ --verbose \
+ $EXTRA_BALROG_SUBMITTER_PARAMS
diff --git a/testing/docker/funsize-balrog-submitter/scripts/funsize-balrog-submitter.py b/testing/docker/funsize-balrog-submitter/scripts/funsize-balrog-submitter.py
new file mode 100644
index 000000000..17e713069
--- /dev/null
+++ b/testing/docker/funsize-balrog-submitter/scripts/funsize-balrog-submitter.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+import site
+import os
+import logging
+import argparse
+import json
+import hashlib
+import requests
+import tempfile
+from boto.s3.connection import S3Connection
+from mardor.marfile import MarFile
+
+site.addsitedir("/home/worker/tools/lib/python")
+
+from balrog.submitter.cli import NightlySubmitterV4, ReleaseSubmitterV4
+from util.retry import retry, retriable
+
+log = logging.getLogger(__name__)
+
+
+def get_hash(content, hash_type="md5"):
+ h = hashlib.new(hash_type)
+ h.update(content)
+ return h.hexdigest()
+
+
+@retriable()
+def download(url, dest, mode=None):
+ log.debug("Downloading %s to %s", url, dest)
+ r = requests.get(url)
+ r.raise_for_status()
+
+ bytes_downloaded = 0
+ with open(dest, 'wb') as fd:
+ for chunk in r.iter_content(4096):
+ fd.write(chunk)
+ bytes_downloaded += len(chunk)
+
+ log.debug('Downloaded %s bytes', bytes_downloaded)
+ if 'content-length' in r.headers:
+ log.debug('Content-Length: %s bytes', r.headers['content-length'])
+ if bytes_downloaded != int(r.headers['content-length']):
+ raise IOError('Unexpected number of bytes downloaded')
+
+ if mode:
+ log.debug("chmod %o %s", mode, dest)
+ os.chmod(dest, mode)
+
+
+def verify_signature(mar, signature):
+ log.info("Checking %s signature", mar)
+ m = MarFile(mar, signature_versions=[(1, signature)])
+ m.verify_signatures()
+
+
+def verify_copy_to_s3(bucket_name, aws_access_key_id, aws_secret_access_key,
+ mar_url, mar_dest, signing_cert):
+ conn = S3Connection(aws_access_key_id, aws_secret_access_key)
+ bucket = conn.get_bucket(bucket_name)
+ _, dest = tempfile.mkstemp()
+ log.info("Downloading %s to %s...", mar_url, dest)
+ download(mar_url, dest)
+ log.info("Verifying the signature...")
+ if not os.getenv("MOZ_DISABLE_MAR_CERT_VERIFICATION"):
+ verify_signature(dest, signing_cert)
+ for name in possible_names(mar_dest, 10):
+ log.info("Checking if %s already exists", name)
+ key = bucket.get_key(name)
+ if not key:
+ log.info("Uploading to %s...", name)
+ key = bucket.new_key(name)
+ # There is a chance for race condition here. To avoid it we check
+ # the return value with replace=False. It should be not None.
+ length = key.set_contents_from_filename(dest, replace=False)
+ if length is None:
+ log.warn("Name race condition using %s, trying again...", name)
+ continue
+ else:
+ # key.make_public() may lead to race conditions, because
+ # it doesn't pass version_id, so it may not set permissions
+ bucket.set_canned_acl(acl_str='public-read', key_name=name,
+ version_id=key.version_id)
+ # Use explicit version_id to avoid using "latest" version
+ return key.generate_url(expires_in=0, query_auth=False,
+ version_id=key.version_id)
+ else:
+ if get_hash(key.get_contents_as_string()) == \
+ get_hash(open(dest).read()):
+ log.info("%s has the same MD5 checksum, not uploading...",
+ name)
+ return key.generate_url(expires_in=0, query_auth=False,
+ version_id=key.version_id)
+ log.info("%s already exists with different checksum, "
+ "trying another one...", name)
+
+ raise RuntimeError("Cannot generate a unique name for %s", mar_dest)
+
+
+def possible_names(initial_name, amount):
+ """Generate names appending counter before extension"""
+ prefix, ext = os.path.splitext(initial_name)
+ return [initial_name] + ["{}-{}{}".format(prefix, n, ext) for n in
+ range(1, amount + 1)]
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--artifacts-url-prefix", required=True,
+ help="URL prefix for MAR")
+ parser.add_argument("--manifest", required=True)
+ parser.add_argument("-a", "--api-root", required=True,
+ help="Balrog API root")
+ parser.add_argument("-d", "--dummy", action="store_true",
+ help="Add '-dummy' suffix to branch name")
+ parser.add_argument("--signing-cert", required=True)
+ parser.add_argument("-v", "--verbose", action="store_const",
+ dest="loglevel", const=logging.DEBUG,
+ default=logging.INFO)
+ parser.add_argument("--product", help="Override product name from application.ini")
+ args = parser.parse_args()
+ logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
+ level=args.loglevel)
+ logging.getLogger("requests").setLevel(logging.WARNING)
+ logging.getLogger("boto").setLevel(logging.WARNING)
+
+ balrog_username = os.environ.get("BALROG_USERNAME")
+ balrog_password = os.environ.get("BALROG_PASSWORD")
+ if not balrog_username and not balrog_password:
+ raise RuntimeError("BALROG_USERNAME and BALROG_PASSWORD environment "
+ "variables should be set")
+
+ s3_bucket = os.environ.get("S3_BUCKET")
+ aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
+ aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY")
+ if not (s3_bucket and aws_access_key_id and aws_secret_access_key):
+ log.warn("Skipping S3 uploads...")
+ uploads_enabled = False
+ else:
+ uploads_enabled = True
+
+ manifest = json.load(open(args.manifest))
+ auth = (balrog_username, balrog_password)
+
+ for e in manifest:
+ complete_info = [{
+ "hash": e["to_hash"],
+ "size": e["to_size"],
+ }]
+ partial_info = [{
+ "hash": e["hash"],
+ "size": e["size"],
+ }]
+
+ if "previousVersion" in e and "previousBuildNumber" in e:
+ log.info("Release style balrog submission")
+ partial_info[0]["previousVersion"] = e["previousVersion"]
+ partial_info[0]["previousBuildNumber"] = e["previousBuildNumber"]
+ submitter = ReleaseSubmitterV4(api_root=args.api_root, auth=auth,
+ dummy=args.dummy)
+ productName = args.product or e["appName"]
+ retry(lambda: submitter.run(
+ platform=e["platform"], productName=productName,
+ version=e["toVersion"],
+ build_number=e["toBuildNumber"],
+ appVersion=e["version"], extVersion=e["version"],
+ buildID=e["to_buildid"], locale=e["locale"],
+ hashFunction='sha512',
+ partialInfo=partial_info, completeInfo=complete_info,
+ ))
+ elif "from_buildid" in e and uploads_enabled:
+ log.info("Nightly style balrog submission")
+ partial_mar_url = "{}/{}".format(args.artifacts_url_prefix,
+ e["mar"])
+ complete_mar_url = e["to_mar"]
+ dest_prefix = "{branch}/{buildid}".format(
+ branch=e["branch"], buildid=e["to_buildid"])
+ partial_mar_dest = "{}/{}".format(dest_prefix, e["mar"])
+ complete_mar_filename = "{appName}-{branch}-{version}-" \
+ "{platform}-{locale}.complete.mar"
+ complete_mar_filename = complete_mar_filename.format(
+ appName=e["appName"], branch=e["branch"],
+ version=e["version"], platform=e["platform"],
+ locale=e["locale"]
+ )
+ complete_mar_dest = "{}/{}".format(dest_prefix,
+ complete_mar_filename)
+ partial_info[0]["url"] = verify_copy_to_s3(
+ s3_bucket, aws_access_key_id, aws_secret_access_key,
+ partial_mar_url, partial_mar_dest, args.signing_cert)
+ complete_info[0]["url"] = verify_copy_to_s3(
+ s3_bucket, aws_access_key_id, aws_secret_access_key,
+ complete_mar_url, complete_mar_dest, args.signing_cert)
+ partial_info[0]["from_buildid"] = e["from_buildid"]
+ submitter = NightlySubmitterV4(api_root=args.api_root, auth=auth,
+ dummy=args.dummy)
+ productName = args.product or e["appName"]
+ retry(lambda: submitter.run(
+ platform=e["platform"], buildID=e["to_buildid"],
+ productName=productName, branch=e["branch"],
+ appVersion=e["version"], locale=e["locale"],
+ hashFunction='sha512', extVersion=e["version"],
+ partialInfo=partial_info, completeInfo=complete_info),
+ attempts=30, sleeptime=10, max_sleeptime=60,
+ )
+ else:
+ raise RuntimeError("Cannot determine Balrog submission style")
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/docker/funsize-update-generator/Dockerfile b/testing/docker/funsize-update-generator/Dockerfile
new file mode 100644
index 000000000..afa8290b1
--- /dev/null
+++ b/testing/docker/funsize-update-generator/Dockerfile
@@ -0,0 +1,35 @@
+FROM ubuntu:vivid
+MAINTAINER Rail Aliiev <rail@mozilla.com>
+
+# Required software
+ENV DEBIAN_FRONTEND noninteractive
+# Ubuntu Vivid has been moved to the old-releases repo
+RUN sed -i -e 's/archive.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list
+# Chain apt-get commands with apt-get clean in a single docker RUN
+# to make sure that files are removed within a single docker layer
+RUN apt-get update -q && \
+ apt-get install -yyq --no-install-recommends \
+ python python-setuptools python-cryptography libgetopt-simple-perl \
+ bzip2 clamav clamav-freshclam python-requests python-sh curl && \
+ apt-get clean
+RUN useradd -d /home/worker -s /bin/bash -m worker
+COPY requirements.txt /tmp/
+# python-pip installs a lot of dependencies increasing the size of an image
+# drastically. Using easy_install saves us almost 200M.
+RUN easy_install pip
+RUN pip install -r /tmp/requirements.txt
+
+# scripts
+RUN mkdir /home/worker/bin
+COPY scripts/* /home/worker/bin/
+COPY runme.sh /runme.sh
+RUN chmod 755 /home/worker/bin/* /runme.sh
+RUN mkdir /home/worker/keys
+COPY *.pubkey /home/worker/keys/
+# Freshclam may be flaky, retry if it fails
+RUN for i in 1 2 3 4 5; do freshclam --verbose && break || sleep 15; done
+
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
diff --git a/testing/docker/funsize-update-generator/Makefile b/testing/docker/funsize-update-generator/Makefile
new file mode 100644
index 000000000..ad96cfbf1
--- /dev/null
+++ b/testing/docker/funsize-update-generator/Makefile
@@ -0,0 +1,17 @@
+DOCKERIO_USERNAME =$(error DOCKERIO_USERNAME should be set)
+IMAGE_NAME = funsize-update-generator
+FULL_IMAGE_NAME = $(DOCKERIO_USERNAME)/$(IMAGE_NAME)
+
+build:
+ docker build -t $(FULL_IMAGE_NAME) --no-cache --rm .
+
+push:
+ docker push $(FULL_IMAGE_NAME):latest
+
+pull:
+ docker pull $(FULL_IMAGE_NAME):latest
+
+update_pubkeys:
+ curl https://hg.mozilla.org/mozilla-central/raw-file/default/toolkit/mozapps/update/updater/nightly_aurora_level3_primary.der | openssl x509 -inform DER -pubkey -noout > nightly.pubkey
+ curl https://hg.mozilla.org/mozilla-central/raw-file/default/toolkit/mozapps/update/updater/dep1.der | openssl x509 -inform DER -pubkey -noout > dep.pubkey
+ curl https://hg.mozilla.org/mozilla-central/raw-file/default/toolkit/mozapps/update/updater/release_primary.der | openssl x509 -inform DER -pubkey -noout > release.pubkey
diff --git a/testing/docker/funsize-update-generator/dep.pubkey b/testing/docker/funsize-update-generator/dep.pubkey
new file mode 100644
index 000000000..a1213a57e
--- /dev/null
+++ b/testing/docker/funsize-update-generator/dep.pubkey
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzjHSobdeiQ3JHP/cCIOp
+WaX9y12rL5mIo9OR9bpqEZdD0yXJJJeZA887Mv8slqsM+qObMUpKvfEE6zyYPIZJ
+ANib31neI5BBYHhfhf2f5EnkilSYlmU3Gx+uRsmsdt58PpYe124tOAGgca/8bUy3
+eb6kUUTwvMI0oWQuPkGUaoHVQyj/bBMTrIkyF3UbfFtiX/SfOPvIoabNUe+pQHUe
+pqC2+RxzDGj+shTq/hYhtXlptFzsEEb2+0foLy0MY8C30dP2QqbM2iavvr/P8OcS
+Gm3H0TQcRzIEBzvPcIjiZi1nQj/r/3TlYRNCjuYT/HsNLXrB/U5Tc990jjAUJxdH
+0wIDAQAB
+-----END PUBLIC KEY-----
diff --git a/testing/docker/funsize-update-generator/nightly.pubkey b/testing/docker/funsize-update-generator/nightly.pubkey
new file mode 100644
index 000000000..93c0904d5
--- /dev/null
+++ b/testing/docker/funsize-update-generator/nightly.pubkey
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4j/IS3gWbyVVnWn4ZRnC
+Fuzb6VAaHa0I+4E504ekhVAhbKlSfBstkLbXajdjUVAJpn02zWnOaTl5KAdpDpIp
+SkdA4mK20ej3/Ij7gIt8IwaX+ArXL8mP84pxDn5BgaNADm3206Z6YQzc/TDYu529
+qkDFmLqNUVRJAhPO+qqhKHIcVGh8HUHXN6XV1qOFip+UU0M474jAGgurVmAv8Rh7
+VvM0v5KmB6V6WHwM5gwjg2yRY/o+xYIsNeSes9rpp+MOs/RnUA6LI4WZGY4YahvX
+VclIXBDgbWPYtojexIJkmYj8JIIRsh3eCsrRRe14fq7cBurp3CxBYMlDHf0RUoaq
+hQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/testing/docker/funsize-update-generator/release.pubkey b/testing/docker/funsize-update-generator/release.pubkey
new file mode 100644
index 000000000..20df95946
--- /dev/null
+++ b/testing/docker/funsize-update-generator/release.pubkey
@@ -0,0 +1,9 @@
+-----BEGIN PUBLIC KEY-----
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvH4r94FpQ0gvr1hhTfV9
+NUeWPJ5CN6TZRq7v/Dc4nkJ1J4IP1B3UEii34tcNKpy1nKupiZuTT6T1zQYT+z5x
+3UkDF9qQboQ8RNb/BEz/cN3on/LTEnZ7YSraRL11M6cEB8mvmJxddCEquwqccRbs
+Usp8WUB7uRv1w6Anley7N9F/LE1iLPwJasZypRnzWb3aYsJy0cMFOYy+OXVdpktn
+qYqlNIjnt84u4Nil6UXnBbIJNUVOCY8wOFClNvVpubjPkWK1gtdWy3x/hJU5RpAO
+K9cnHxq4M/I4SUWTWO3r7yweQiHG4Jyoc7sP1jkwjBkSG93sDEycfwOdOoZft3wN
+sQIDAQAB
+-----END PUBLIC KEY-----
diff --git a/testing/docker/funsize-update-generator/requirements.txt b/testing/docker/funsize-update-generator/requirements.txt
new file mode 100644
index 000000000..58a2d60b7
--- /dev/null
+++ b/testing/docker/funsize-update-generator/requirements.txt
@@ -0,0 +1,2 @@
+mar==1.2
+redo
diff --git a/testing/docker/funsize-update-generator/runme.sh b/testing/docker/funsize-update-generator/runme.sh
new file mode 100644
index 000000000..92094a76e
--- /dev/null
+++ b/testing/docker/funsize-update-generator/runme.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+set -xe
+
+test $TASK_ID
+test $SIGNING_CERT
+
+ARTIFACTS_DIR="/home/worker/artifacts"
+mkdir -p "$ARTIFACTS_DIR"
+
+curl --location --retry 10 --retry-delay 10 -o /home/worker/task.json \
+ "https://queue.taskcluster.net/v1/task/$TASK_ID"
+
+# enable locale cache
+export MBSDIFF_HOOK="/home/worker/bin/mbsdiff_hook.sh -c /tmp/fs-cache"
+
+if [ ! -z $FILENAME_TEMPLATE ]; then
+ EXTRA_PARAMS="--filename-template $FILENAME_TEMPLATE $EXTRA_PARAMS"
+fi
+
+/home/worker/bin/funsize.py \
+ --artifacts-dir "$ARTIFACTS_DIR" \
+ --task-definition /home/worker/task.json \
+ --signing-cert "/home/worker/keys/${SIGNING_CERT}.pubkey" \
+ $EXTRA_PARAMS
diff --git a/testing/docker/funsize-update-generator/scripts/funsize.py b/testing/docker/funsize-update-generator/scripts/funsize.py
new file mode 100755
index 000000000..fd591817c
--- /dev/null
+++ b/testing/docker/funsize-update-generator/scripts/funsize.py
@@ -0,0 +1,275 @@
+#!/usr/bin/env python
+
+import ConfigParser
+import argparse
+import functools
+import hashlib
+import json
+import logging
+import os
+import shutil
+import tempfile
+import requests
+import sh
+
+import redo
+from mardor.marfile import MarFile
+
+log = logging.getLogger(__name__)
+ALLOWED_URL_PREFIXES = [
+ "http://download.cdn.mozilla.net/pub/mozilla.org/firefox/nightly/",
+ "http://download.cdn.mozilla.net/pub/firefox/nightly/",
+ "https://mozilla-nightly-updates.s3.amazonaws.com",
+ "https://queue.taskcluster.net/",
+ "http://ftp.mozilla.org/",
+ "http://download.mozilla.org/",
+ "https://archive.mozilla.org/",
+]
+
+DEFAULT_FILENAME_TEMPLATE = "{appName}-{branch}-{version}-{platform}-" \
+ "{locale}-{from_buildid}-{to_buildid}.partial.mar"
+
+
+def verify_signature(mar, signature):
+ log.info("Checking %s signature", mar)
+ m = MarFile(mar, signature_versions=[(1, signature)])
+ m.verify_signatures()
+
+
+@redo.retriable()
+def download(url, dest, mode=None):
+ log.debug("Downloading %s to %s", url, dest)
+ r = requests.get(url)
+ r.raise_for_status()
+
+ bytes_downloaded = 0
+ with open(dest, 'wb') as fd:
+ for chunk in r.iter_content(4096):
+ fd.write(chunk)
+ bytes_downloaded += len(chunk)
+
+ log.debug('Downloaded %s bytes', bytes_downloaded)
+ if 'content-length' in r.headers:
+ log.debug('Content-Length: %s bytes', r.headers['content-length'])
+ if bytes_downloaded != int(r.headers['content-length']):
+ raise IOError('Unexpected number of bytes downloaded')
+
+ if mode:
+ log.debug("chmod %o %s", mode, dest)
+ os.chmod(dest, mode)
+
+
+def unpack(work_env, mar, dest_dir):
+ os.mkdir(dest_dir)
+ unwrap_cmd = sh.Command(os.path.join(work_env.workdir,
+ "unwrap_full_update.pl"))
+ log.debug("Unwrapping %s", mar)
+ out = unwrap_cmd(mar, _cwd=dest_dir, _env=work_env.env, _timeout=240,
+ _err_to_out=True)
+ if out:
+ log.debug(out)
+
+
+def find_file(directory, filename):
+ log.debug("Searching for %s in %s", filename, directory)
+ for root, dirs, files in os.walk(directory):
+ if filename in files:
+ f = os.path.join(root, filename)
+ log.debug("Found %s", f)
+ return f
+
+
+def get_option(directory, filename, section, option):
+ log.debug("Exctracting [%s]: %s from %s/**/%s", section, option, directory,
+ filename)
+ f = find_file(directory, filename)
+ config = ConfigParser.ConfigParser()
+ config.read(f)
+ rv = config.get(section, option)
+ log.debug("Found %s", rv)
+ return rv
+
+
+def generate_partial(work_env, from_dir, to_dir, dest_mar, channel_ids,
+ version):
+ log.debug("Generating partial %s", dest_mar)
+ env = work_env.env
+ env["MOZ_PRODUCT_VERSION"] = version
+ env["MOZ_CHANNEL_ID"] = channel_ids
+ make_incremental_update = os.path.join(work_env.workdir,
+ "make_incremental_update.sh")
+ out = sh.bash(make_incremental_update, dest_mar, from_dir, to_dir,
+ _cwd=work_env.workdir, _env=env, _timeout=900,
+ _err_to_out=True)
+ if out:
+ log.debug(out)
+
+
+def get_hash(path, hash_type="sha512"):
+ h = hashlib.new(hash_type)
+ with open(path, "rb") as f:
+ for chunk in iter(functools.partial(f.read, 4096), ''):
+ h.update(chunk)
+ return h.hexdigest()
+
+
+class WorkEnv(object):
+
+ def __init__(self):
+ self.workdir = tempfile.mkdtemp()
+
+ def setup(self):
+ self.download_unwrap()
+ self.download_martools()
+
+ def download_unwrap(self):
+ # unwrap_full_update.pl is not too sensitive to the revision
+ url = "https://hg.mozilla.org/mozilla-central/raw-file/default/" \
+ "tools/update-packaging/unwrap_full_update.pl"
+ download(url, dest=os.path.join(self.workdir, "unwrap_full_update.pl"),
+ mode=0o755)
+
+ def download_buildsystem_bits(self, repo, revision):
+ prefix = "{repo}/raw-file/{revision}/tools/update-packaging"
+ prefix = prefix.format(repo=repo, revision=revision)
+ for f in ("make_incremental_update.sh", "common.sh"):
+ url = "{prefix}/{f}".format(prefix=prefix, f=f)
+ download(url, dest=os.path.join(self.workdir, f), mode=0o755)
+
+ def download_martools(self):
+ # TODO: check if the tools have to be branch specific
+ prefix = "https://ftp.mozilla.org/pub/mozilla.org/firefox/nightly/" \
+ "latest-mozilla-central/mar-tools/linux64"
+ for f in ("mar", "mbsdiff"):
+ url = "{prefix}/{f}".format(prefix=prefix, f=f)
+ download(url, dest=os.path.join(self.workdir, f), mode=0o755)
+
+ def cleanup(self):
+ shutil.rmtree(self.workdir)
+
+ @property
+ def env(self):
+ my_env = os.environ.copy()
+ my_env['LC_ALL'] = 'C'
+ my_env['MAR'] = os.path.join(self.workdir, "mar")
+ my_env['MBSDIFF'] = os.path.join(self.workdir, "mbsdiff")
+ return my_env
+
+
+def verify_allowed_url(mar):
+ if not any(mar.startswith(prefix) for prefix in ALLOWED_URL_PREFIXES):
+ raise ValueError("{mar} is not in allowed URL prefixes: {p}".format(
+ mar=mar, p=ALLOWED_URL_PREFIXES
+ ))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--artifacts-dir", required=True)
+ parser.add_argument("--signing-cert", required=True)
+ parser.add_argument("--task-definition", required=True,
+ type=argparse.FileType('r'))
+ parser.add_argument("--filename-template",
+ default=DEFAULT_FILENAME_TEMPLATE)
+ parser.add_argument("--no-freshclam", action="store_true", default=False,
+ help="Do not refresh ClamAV DB")
+ parser.add_argument("-q", "--quiet", dest="log_level",
+ action="store_const", const=logging.WARNING,
+ default=logging.DEBUG)
+ args = parser.parse_args()
+
+ logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s",
+ level=args.log_level)
+ task = json.load(args.task_definition)
+ # TODO: verify task["extra"]["funsize"]["partials"] with jsonschema
+
+ if args.no_freshclam:
+ log.info("Skipping freshclam")
+ else:
+ log.info("Refreshing clamav db...")
+ try:
+ redo.retry(lambda: sh.freshclam("--stdout", "--verbose",
+ _timeout=300, _err_to_out=True))
+ log.info("Done.")
+ except sh.ErrorReturnCode:
+ log.warning("Freshclam failed, skipping DB update")
+ manifest = []
+ for e in task["extra"]["funsize"]["partials"]:
+ for mar in (e["from_mar"], e["to_mar"]):
+ verify_allowed_url(mar)
+
+ work_env = WorkEnv()
+ # TODO: run setup once
+ work_env.setup()
+ complete_mars = {}
+ for mar_type, f in (("from", e["from_mar"]), ("to", e["to_mar"])):
+ dest = os.path.join(work_env.workdir, "{}.mar".format(mar_type))
+ unpack_dir = os.path.join(work_env.workdir, mar_type)
+ download(f, dest)
+ if not os.getenv("MOZ_DISABLE_MAR_CERT_VERIFICATION"):
+ verify_signature(dest, args.signing_cert)
+ complete_mars["%s_size" % mar_type] = os.path.getsize(dest)
+ complete_mars["%s_hash" % mar_type] = get_hash(dest)
+ unpack(work_env, dest, unpack_dir)
+ log.info("AV-scanning %s ...", unpack_dir)
+ sh.clamscan("-r", unpack_dir, _timeout=600, _err_to_out=True)
+ log.info("Done.")
+
+ path = os.path.join(work_env.workdir, "to")
+ from_path = os.path.join(work_env.workdir, "from")
+ mar_data = {
+ "ACCEPTED_MAR_CHANNEL_IDS": get_option(
+ path, filename="update-settings.ini", section="Settings",
+ option="ACCEPTED_MAR_CHANNEL_IDS"),
+ "version": get_option(path, filename="application.ini",
+ section="App", option="Version"),
+ "to_buildid": get_option(path, filename="application.ini",
+ section="App", option="BuildID"),
+ "from_buildid": get_option(from_path, filename="application.ini",
+ section="App", option="BuildID"),
+ "appName": get_option(from_path, filename="application.ini",
+ section="App", option="Name"),
+ # Use Gecko repo and rev from platform.ini, not application.ini
+ "repo": get_option(path, filename="platform.ini", section="Build",
+ option="SourceRepository"),
+ "revision": get_option(path, filename="platform.ini",
+ section="Build", option="SourceStamp"),
+ "from_mar": e["from_mar"],
+ "to_mar": e["to_mar"],
+ "platform": e["platform"],
+ "locale": e["locale"],
+ }
+ # Override ACCEPTED_MAR_CHANNEL_IDS if needed
+ if "ACCEPTED_MAR_CHANNEL_IDS" in os.environ:
+ mar_data["ACCEPTED_MAR_CHANNEL_IDS"] = os.environ["ACCEPTED_MAR_CHANNEL_IDS"]
+ for field in ("update_number", "previousVersion",
+ "previousBuildNumber", "toVersion",
+ "toBuildNumber"):
+ if field in e:
+ mar_data[field] = e[field]
+ mar_data.update(complete_mars)
+ # if branch not set explicitly use repo-name
+ mar_data["branch"] = e.get("branch",
+ mar_data["repo"].rstrip("/").split("/")[-1])
+ mar_name = args.filename_template.format(**mar_data)
+ mar_data["mar"] = mar_name
+ dest_mar = os.path.join(work_env.workdir, mar_name)
+ # TODO: download these once
+ work_env.download_buildsystem_bits(repo=mar_data["repo"],
+ revision=mar_data["revision"])
+ generate_partial(work_env, from_path, path, dest_mar,
+ mar_data["ACCEPTED_MAR_CHANNEL_IDS"],
+ mar_data["version"])
+ mar_data["size"] = os.path.getsize(dest_mar)
+ mar_data["hash"] = get_hash(dest_mar)
+
+ shutil.copy(dest_mar, args.artifacts_dir)
+ work_env.cleanup()
+ manifest.append(mar_data)
+ manifest_file = os.path.join(args.artifacts_dir, "manifest.json")
+ with open(manifest_file, "w") as fp:
+ json.dump(manifest, fp, indent=2, sort_keys=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/docker/funsize-update-generator/scripts/mbsdiff_hook.sh b/testing/docker/funsize-update-generator/scripts/mbsdiff_hook.sh
new file mode 100755
index 000000000..0b677a5e9
--- /dev/null
+++ b/testing/docker/funsize-update-generator/scripts/mbsdiff_hook.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#
+# This tool contains functions that are to be used to handle/enable funsize
+# Author: Mihai Tabara
+#
+
+HOOK=
+SERVER_URL=
+LOCAL_CACHE_DIR=
+
+getsha512(){
+ echo "$(openssl sha512 "${1}" | awk '{print $2}')"
+}
+
+print_usage(){
+ echo "$(basename $0) -A SERVER-URL [-c LOCAL-CACHE-DIR-PATH] [-g] [-u] PATH-FROM-URL PATH-TO-URL PATH-PATCH"
+ echo "Script that saves/retrieves from cache presumptive patches as args"
+ echo ""
+ echo "-A SERVER-URL - host where to send the files"
+ echo "-c LOCAL-CACHE-DIR-PATH local path to which patches are cached"
+ echo "-g pre hook - tests whether patch already in cache"
+ echo "-u post hook - upload patch to cache for future use"
+ echo ""
+ echo "PATH-FROM-URL : path on disk for source file"
+ echo "PATH-TO-URL : path on disk for destination file"
+ echo "PATH-PATCH : path on disk for patch between source and destination"
+}
+
+upload_patch(){
+ sha_from=`getsha512 "$1"`
+ sha_to=`getsha512 "$2"`
+ patch_path="$3"
+
+ # save to local cache first
+ if [ -n "$LOCAL_CACHE_DIR" ]; then
+ local_cmd="mkdir -p "$LOCAL_CACHE_DIR/$sha_from""
+ if `$local_cmd` >&2; then
+ cp -avf "$patch_path" "$LOCAL_CACHE_DIR/$sha_from/$sha_to"
+ echo "$patch_path saved on local cache!"
+ fi
+ fi
+ # The remote cache implementation is not used. The code is for usage
+ # reference only.
+ return 0
+
+ # send it over to funsize
+ cmd="curl -sSw %{http_code} -o /dev/null -X POST $SERVER_URL -F sha_from="$sha_from" -F sha_to="$sha_to" -F patch_file="@$patch_path""
+ ret_code=`$cmd`
+
+ if [ $ret_code -eq 200 ]; then
+ echo "$patch_path Successful uploaded to funsize!"
+ return 0
+ fi
+
+ echo "$patch_path Failed to be uploaded to funsize!"
+ return 1
+}
+
+get_patch(){
+ sha_from=`getsha512 "$1"`
+ sha_to=`getsha512 "$2"`
+ destination_file="$3"
+ tmp_file="$destination_file.tmp"
+
+ # try to retrieve from local cache first
+ if [ -r "$LOCAL_CACHE_DIR/$sha_from/$sha_to" ]; then
+ cp -avf "$LOCAL_CACHE_DIR/$sha_from/$sha_to" "$destination_file"
+ echo "Successful retrieved $destination_file from local cache!"
+ return 0
+ else
+ echo "File is not in the locale cache"
+ return 1
+ fi
+ # The remote cache implementation is not used. The code is for usage
+ # reference only.
+
+ # if unsuccessful, try to retrieve from funsize
+ cmd="curl -LsSGw %{http_code} $SERVER_URL/$sha_from/$sha_to -o $tmp_file"
+ ret_code=`$cmd`
+
+ if [ $ret_code -eq 200 ]; then
+ mv "$tmp_file" "$destination_file"
+ echo "Successful retrieved $destination_file from funsize!"
+ return 0
+ fi
+
+ rm -f "$tmp_file"
+ echo "Failed to retrieve $destination_file from funsize!"
+ return 1
+}
+
+OPTIND=1
+
+while getopts ":A:c:gu" option; do
+ case $option in
+ A)
+ SERVER_URL="$OPTARG"
+ ;;
+ c)
+ LOCAL_CACHE_DIR="$OPTARG"
+ ;;
+ g)
+ HOOK="PRE"
+ ;;
+ u)
+ HOOK="POST"
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ print_usage
+ exit 1
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument." >&2
+ print_usage
+ exit 1
+ ;;
+ *)
+ echo "Unimplemented option: -$OPTARG" >&2
+ print_usage
+ exit 1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+if [ "$HOOK" == "PRE" ]; then
+ get_patch "$1" "$2" "$3"
+elif [ "$HOOK" == "POST" ]; then
+ upload_patch "$1" "$2" "$3"
+fi
diff --git a/testing/docker/image_builder/Dockerfile b/testing/docker/image_builder/Dockerfile
new file mode 100644
index 000000000..9acbafaab
--- /dev/null
+++ b/testing/docker/image_builder/Dockerfile
@@ -0,0 +1,40 @@
+FROM ubuntu:16.04
+
+# %include testing/docker/recipes/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /setup/tooltool.py
+
+# %include testing/docker/recipes/common.sh
+ADD topsrcdir/testing/docker/recipes/common.sh /setup/common.sh
+
+# %include testing/docker/recipes/install-mercurial.sh
+ADD topsrcdir/testing/docker/recipes/install-mercurial.sh /setup/install-mercurial.sh
+
+# %include testing/mozharness/external_tools/robustcheckout.py
+ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
+
+# %include testing/docker/recipes/run-task
+ADD topsrcdir/testing/docker/recipes/run-task /usr/local/bin/run-task
+
+# Add and run setup script
+ADD build-image.sh /usr/local/bin/build-image.sh
+ADD setup.sh /setup/setup.sh
+RUN bash /setup/setup.sh
+
+# Setup a workspace that won't use AUFS
+VOLUME /home/worker/workspace
+
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
+ENV HOSTNAME taskcluster-worker
+ENV LC_ALL C
+
+# Create worker user
+RUN useradd -d /home/worker -s /bin/bash -m worker
+
+# Set some sane defaults
+WORKDIR /home/worker/
+CMD build-image.sh
diff --git a/testing/docker/image_builder/REGISTRY b/testing/docker/image_builder/REGISTRY
new file mode 100644
index 000000000..cb1e1bb48
--- /dev/null
+++ b/testing/docker/image_builder/REGISTRY
@@ -0,0 +1 @@
+taskcluster
diff --git a/testing/docker/image_builder/VERSION b/testing/docker/image_builder/VERSION
new file mode 100644
index 000000000..3eefcb9dd
--- /dev/null
+++ b/testing/docker/image_builder/VERSION
@@ -0,0 +1 @@
+1.0.0
diff --git a/testing/docker/image_builder/build-image.sh b/testing/docker/image_builder/build-image.sh
new file mode 100755
index 000000000..25e0d6a28
--- /dev/null
+++ b/testing/docker/image_builder/build-image.sh
@@ -0,0 +1,59 @@
+#!/bin/bash -vex
+
+# Set bash options to exit immediately if a pipeline exists non-zero, expand
+# print a trace of commands, and make output verbose (print shell input as it's
+# read)
+# See https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html
+set -x -e -v
+
+# Prefix errors with taskcluster error prefix so that they are parsed by Treeherder
+raise_error() {
+ echo
+ echo "[taskcluster-image-build:error] $1"
+ exit 1
+}
+
+# Ensure that the PROJECT is specified so the image can be indexed
+test -n "$PROJECT" || raise_error "PROJECT must be provided."
+test -n "$HASH" || raise_error "Context HASH must be provided."
+test -n "$IMAGE_NAME" || raise_error "IMAGE_NAME must be provided."
+
+# Create artifact folder
+mkdir -p /home/worker/workspace/artifacts
+
+# Construct a CONTEXT_FILE
+CONTEXT_FILE=/home/worker/workspace/context.tar
+
+# Run ./mach taskcluster-build-image with --context-only to build context
+run-task \
+ --chown-recursive "/home/worker/workspace" \
+ --vcs-checkout "/home/worker/checkouts/gecko" \
+ -- \
+ /home/worker/checkouts/gecko/mach taskcluster-build-image \
+ --context-only "$CONTEXT_FILE" \
+ "$IMAGE_NAME"
+test -f "$CONTEXT_FILE" || raise_error "Context file wasn't created"
+
+# Post context tar-ball to docker daemon
+# This interacts directly with the docker remote API, see:
+# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.18/
+curl -s \
+ -X POST \
+ --header 'Content-Type: application/tar' \
+ --data-binary "@$CONTEXT_FILE" \
+ --unix-socket /var/run/docker.sock "http:/build?t=$IMAGE_NAME:$HASH" \
+ | tee /tmp/docker-build.log \
+ | jq -r '.status + .progress, .stream[:-1], .error | select(. != null)'
+
+# Exit non-zero if there is error entries in the log
+if cat /tmp/docker-build.log | jq -se 'add | .error' > /dev/null; then
+ raise_error "Image build failed: `cat /tmp/docker-build.log | jq -rse 'add | .error'`";
+fi
+
+# Get image from docker daemon
+# This interacts directly with the docker remote API, see:
+# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.18/
+curl -s \
+ -X GET \
+ --unix-socket /var/run/docker.sock "http:/images/$IMAGE_NAME:$HASH/get" \
+ | zstd -3 -c -o /home/worker/workspace/artifacts/image.tar.zst
diff --git a/testing/docker/image_builder/setup.sh b/testing/docker/image_builder/setup.sh
new file mode 100644
index 000000000..1a2d13503
--- /dev/null
+++ b/testing/docker/image_builder/setup.sh
@@ -0,0 +1,53 @@
+#!/bin/bash -vex
+set -v -e -x
+
+export DEBIAN_FRONTEND=noninteractive
+
+# Update apt-get lists
+apt-get update -y
+
+# Install dependencies
+apt-get install -y \
+ curl \
+ tar \
+ jq \
+ python \
+ build-essential # Only needed for zstd installation, will be removed later
+
+# Install mercurial
+. /setup/common.sh
+. /setup/install-mercurial.sh
+
+# Install build-image.sh script
+chmod +x /usr/local/bin/build-image.sh
+chmod +x /usr/local/bin/run-task
+
+# Create workspace
+mkdir -p /home/worker/workspace
+
+# Install zstd 1.1.1
+cd /setup
+tooltool_fetch <<EOF
+[
+ {
+ "size": 734872,
+ "visibility": "public",
+ "digest": "a8817e74254f21ee5b76a21691e009ede2cdc70a78facfa453902df3e710e90e78d67f2229956d835960fd1085c33312ff273771b75f9322117d85eb35d8e695",
+ "algorithm": "sha512",
+ "filename": "zstd.tar.gz"
+ }
+]
+EOF
+cd -
+tar -xvf /setup/zstd.tar.gz -C /setup
+make -C /setup/zstd-1.1.1/programs install
+rm -rf /tmp/zstd-1.1.1/ /tmp/zstd.tar.gz
+apt-get purge -y build-essential
+
+# Purge apt-get caches to minimize image size
+apt-get auto-remove -y
+apt-get clean -y
+rm -rf /var/lib/apt/lists/
+
+# Remove this script
+rm -rf /setup/
diff --git a/testing/docker/lint/Dockerfile b/testing/docker/lint/Dockerfile
new file mode 100644
index 000000000..7e9b41214
--- /dev/null
+++ b/testing/docker/lint/Dockerfile
@@ -0,0 +1,36 @@
+FROM ubuntu:16.04
+MAINTAINER Andrew Halberstadt <ahalberstadt@mozilla.com>
+
+RUN useradd -d /home/worker -s /bin/bash -m worker
+WORKDIR /home/worker
+
+RUN mkdir /build
+# %include testing/docker/recipes/tooltool.py
+ADD topsrcdir/testing/docker/recipes/tooltool.py /build/tooltool.py
+
+# %include testing/mozharness/external_tools/robustcheckout.py
+ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
+
+# %include testing/docker/recipes/install-mercurial.sh
+ADD topsrcdir/testing/docker/recipes/install-mercurial.sh /build/install-mercurial.sh
+ADD system-setup.sh /tmp/system-setup.sh
+# %include tools/lint/flake8/flake8_requirements.txt
+ADD topsrcdir/tools/lint/flake8/flake8_requirements.txt /tmp/flake8_requirements.txt
+RUN bash /tmp/system-setup.sh
+
+# %include testing/docker/recipes/run-task
+ADD topsrcdir/testing/docker/recipes/run-task /home/worker/bin/run-task
+RUN chown -R worker:worker /home/worker/bin && chmod 755 /home/worker/bin/*
+
+# Set variable normally configured at login, by the shells parent process, these
+# are taken from GNU su manual
+ENV HOME /home/worker
+ENV SHELL /bin/bash
+ENV USER worker
+ENV LOGNAME worker
+ENV HOSTNAME taskcluster-worker
+ENV LANG en_US.UTF-8
+ENV LC_ALL en_US.UTF-8
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/lint/system-setup.sh b/testing/docker/lint/system-setup.sh
new file mode 100644
index 000000000..5bf1a04dd
--- /dev/null
+++ b/testing/docker/lint/system-setup.sh
@@ -0,0 +1,69 @@
+#!/usr/bin/env bash
+# This allows ubuntu-desktop to be installed without human interaction
+export DEBIAN_FRONTEND=noninteractive
+
+set -ve
+
+test `whoami` == 'root'
+
+mkdir -p /setup
+cd /setup
+
+apt_packages=()
+apt_packages+=('curl')
+apt_packages+=('locales')
+apt_packages+=('python')
+apt_packages+=('python-pip')
+apt_packages+=('sudo')
+apt_packages+=('xz-utils')
+
+apt-get update
+apt-get install -y ${apt_packages[@]}
+
+# Without this we get spurious "LC_ALL: cannot change locale (en_US.UTF-8)" errors,
+# and python scripts raise UnicodeEncodeError when trying to print unicode characters.
+locale-gen en_US.UTF-8
+dpkg-reconfigure locales
+
+tooltool_fetch() {
+ cat >manifest.tt
+ /build/tooltool.py fetch
+ rm manifest.tt
+}
+
+cd /build
+. install-mercurial.sh
+
+###
+# ESLint Setup
+###
+
+# install node
+
+# For future reference things like this don't need to be uploaded to tooltool, as long
+# as we verify the hash, we can download it from the external net.
+cd /setup
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 8310316,
+ "digest": "95f4fa3d9b215348393dfac4a1c5eff72e9ef85dca38eb69cc8e6c1fe5aada0136c3b182dc04ed5c19fb69f0ac7df85d9c4045b9eb382fcb545b0ccacfece25b",
+ "algorithm": "sha512",
+ "filename": "node-v4.4.5-linux-x64.tar.xz"
+}
+]
+EOF
+tar -C /usr/local --strip-components 1 -xJ < node-*.tar.xz
+node -v # verify
+npm -v
+
+###
+# Flake8 Setup
+###
+
+cd /setup
+
+pip install --require-hashes -r /tmp/flake8_requirements.txt
+
+cd /
+rm -rf /setup
diff --git a/testing/docker/recipes/centos6-build-system-setup.sh b/testing/docker/recipes/centos6-build-system-setup.sh
new file mode 100644
index 000000000..bf1d2c78a
--- /dev/null
+++ b/testing/docker/recipes/centos6-build-system-setup.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+cd /setup
+
+. /setup/common.sh
+. /setup/install-mercurial.sh
+
+rm -rf /setup
diff --git a/testing/docker/recipes/common.sh b/testing/docker/recipes/common.sh
new file mode 100644
index 000000000..ca3fc6996
--- /dev/null
+++ b/testing/docker/recipes/common.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+tooltool_fetch() {
+ cat >manifest.tt
+ python /setup/tooltool.py fetch
+ rm manifest.tt
+}
diff --git a/testing/docker/recipes/install-mercurial.sh b/testing/docker/recipes/install-mercurial.sh
new file mode 100644
index 000000000..6311a6f53
--- /dev/null
+++ b/testing/docker/recipes/install-mercurial.sh
@@ -0,0 +1,162 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This script installs and configures Mercurial.
+
+set -e
+
+# Detect OS.
+if [ -f /etc/lsb-release ]; then
+ . /etc/lsb-release
+
+ if [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "16.04" ]; then
+ HG_DEB=1
+ HG_DIGEST=e891b46d8e97cb1c6b0c714e037ea78ae3043f49d27655332c615c861ebb94654a064298c7363d318edd7750c45574cc434848ae758adbcd2a41c6c390006053
+ HG_SIZE=159870
+ HG_FILENAME=mercurial_4.1.2_amd64.deb
+
+ HG_COMMON_DIGEST=112fab48805f267343c5757af5633ef51e4a8fcc7029b83afb7790ba9600ec185d4857dd1925c9aa724bc191f5f37039a59900b99f95e3427bf5d82c85447b69
+ HG_COMMON_SIZE=1919078
+ HG_COMMON_FILENAME=mercurial-common_4.1.2_all.deb
+ elif [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "12.04" ]; then
+ HG_DEB=1
+ HG_DIGEST=67823aa455c59dbdc24ec1f044b0afdb5c03520ef3601509cb5466dc0ac332846caf96176f07de501c568236f6909e55dfc8f4b02f8c69fa593a4abca9abfeb8
+ HG_SIZE=167880
+ HG_FILENAME=mercurial_4.1.2_amd64.deb
+
+ HG_COMMON_DIGEST=5e1c462a9b699d2068f7a0c14589f347ca719c216181ef7a625033df757185eeb3a8fed57986829a7943f16af5a8d66ddf457cc7fc4af557be88eb09486fe665
+ HG_COMMON_SIZE=3091596
+ HG_COMMON_FILENAME=mercurial-common_4.1.2_all.deb
+ fi
+
+ CERT_PATH=/etc/ssl/certs/ca-certificates.crt
+
+elif [ -f /etc/centos-release ]; then
+ CENTOS_VERSION=`rpm -q --queryformat '%{VERSION}' centos-release`
+ if [ "${CENTOS_VERSION}" = "6" ]; then
+ if [ -f /usr/bin/pip2.7 ]; then
+ PIP_PATH=/usr/bin/pip2.7
+ else
+ # The following RPM is "linked" against Python 2.6, which doesn't
+ # support TLS 1.2. Given the security implications of an insecure
+ # version control tool, we choose to prefer a Mercurial built using
+ # Python 2.7 that supports TLS 1.2. Before you uncomment the code
+ # below, think long and hard about the implications of limiting
+ # Mercurial to TLS 1.0.
+ #HG_RPM=1
+ #HG_DIGEST=c64e00c74402cd9c4ef9792177354fa6ff9c8103f41358f0eab2b15dba900d47d04ea582c6c6ebb80cf52495a28433987ffb67a5f39cd843b6638e3fa46921c8
+ #HG_SIZE=4437360
+ #HG_FILENAME=mercurial-4.1.2.x86_64.rpm
+ echo "We currently require Python 2.7 and /usr/bin/pip2.7 to run Mercurial"
+ exit 1
+ fi
+ else
+ echo "Unsupported CentOS version: ${CENTOS_VERSION}"
+ exit 1
+ fi
+
+ CERT_PATH=/etc/ssl/certs/ca-bundle.crt
+fi
+
+if [ -n "${HG_DEB}" ]; then
+tooltool_fetch <<EOF
+[
+{
+ "size": ${HG_SIZE},
+ "digest": "${HG_DIGEST}",
+ "algorithm": "sha512",
+ "filename": "${HG_FILENAME}"
+},
+{
+ "size": ${HG_COMMON_SIZE},
+ "digest": "${HG_COMMON_DIGEST}",
+ "algorithm": "sha512",
+ "filename": "${HG_COMMON_FILENAME}"
+}
+]
+EOF
+
+ dpkg -i ${HG_COMMON_FILENAME} ${HG_FILENAME}
+elif [ -n "${HG_RPM}" ]; then
+tooltool_fetch <<EOF
+[
+{
+ "size": ${HG_SIZE},
+ "digest": "${HG_DIGEST}",
+ "algorithm": "sha512",
+ "filename": "${HG_FILENAME}"
+}
+]
+EOF
+
+ rpm -i ${HG_FILENAME}
+elif [ -n "${PIP_PATH}" ]; then
+tooltool_fetch <<EOF
+[
+{
+"size": 5133417,
+"visibility": "public",
+"digest": "32b59d23d6b911b7a7e9c9c7659457daf2eba771d5170ad5a44a068d7941939e1d68c72c847e488bf26c14392e5d7ee25e5f660e0330250d0685acce40552745",
+"algorithm": "sha512",
+"filename": "mercurial-4.1.2.tar.gz"
+}
+]
+EOF
+
+ ${PIP_PATH} install mercurial-4.1.2.tar.gz
+else
+ echo "Do not know how to install Mercurial on this OS"
+ exit 1
+fi
+
+chmod 644 /usr/local/mercurial/robustcheckout.py
+
+mkdir -p /etc/mercurial
+cat >/etc/mercurial/hgrc <<EOF
+# By default the progress bar starts after 3s and updates every 0.1s. We
+# change this so it shows and updates every 1.0s.
+# We also tell progress to assume a TTY is present so updates are printed
+# even if there is no known TTY.
+[progress]
+delay = 1.0
+refresh = 1.0
+assume-tty = true
+
+[web]
+cacerts = ${CERT_PATH}
+
+[extensions]
+robustcheckout = /usr/local/mercurial/robustcheckout.py
+
+[hostsecurity]
+# When running a modern Python, Mercurial will default to TLS 1.1+.
+# When running on a legacy Python, Mercurial will default to TLS 1.0+.
+# There is no good reason we shouldn't be running a modern Python
+# capable of speaking TLS 1.2. And the only Mercurial servers we care
+# about should be running TLS 1.2. So make TLS 1.2 the minimum.
+minimumprotocol = tls1.2
+
+# Settings to make 1-click loaners more useful.
+[extensions]
+color =
+histedit =
+pager =
+rebase =
+
+[diff]
+git = 1
+showfunc = 1
+
+[pager]
+pager = LESS=FRSXQ less
+
+attend-help = true
+attend-incoming = true
+attend-log = true
+attend-outgoing = true
+attend-status = true
+EOF
+
+chmod 644 /etc/mercurial/hgrc
diff --git a/testing/docker/recipes/run-task b/testing/docker/recipes/run-task
new file mode 100755
index 000000000..978683cb5
--- /dev/null
+++ b/testing/docker/recipes/run-task
@@ -0,0 +1,324 @@
+#!/usr/bin/python2.7 -u
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Run a task after performing common actions.
+
+This script is meant to be the "driver" for TaskCluster based tasks.
+It receives some common arguments to control the run-time environment.
+
+It performs actions as requested from the arguments. Then it executes
+the requested process and prints its output, prefixing it with the
+current time to improve log usefulness.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import datetime
+import errno
+import grp
+import json
+import os
+import pwd
+import re
+import socket
+import stat
+import subprocess
+import sys
+import urllib2
+
+
+FINGERPRINT_URL = 'http://taskcluster/secrets/v1/secret/project/taskcluster/gecko/hgfingerprint'
+FALLBACK_FINGERPRINT = {
+ 'fingerprints':
+ "sha256:8e:ad:f7:6a:eb:44:06:15:ed:f3:e4:69:a6:64:60:37:2d:ff:98:88:37"
+ ":bf:d7:b8:40:84:01:48:9c:26:ce:d9"}
+
+
+def print_line(prefix, m):
+ now = datetime.datetime.utcnow()
+ print(b'[%s %sZ] %s' % (prefix, now.isoformat(), m), end=b'')
+
+
+def run_and_prefix_output(prefix, args, extra_env=None):
+ """Runs a process and prefixes its output with the time.
+
+ Returns the process exit code.
+ """
+ print_line(prefix, b'executing %s\n' % args)
+
+ env = dict(os.environ)
+ env.update(extra_env or {})
+
+ # Note: TaskCluster's stdin is a TTY. This attribute is lost
+ # when we pass sys.stdin to the invoked process. If we cared
+ # to preserve stdin as a TTY, we could make this work. But until
+ # someone needs it, don't bother.
+ p = subprocess.Popen(args,
+ # Disable buffering because we want to receive output
+ # as it is generated so timestamps in logs are
+ # accurate.
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ stdin=sys.stdin.fileno(),
+ cwd='/',
+ env=env,
+ # So \r in progress bars are rendered as multiple
+ # lines, preserving progress indicators.
+ universal_newlines=True)
+
+ while True:
+ data = p.stdout.readline()
+ if data == b'':
+ break
+
+ print_line(prefix, data)
+
+ return p.wait()
+
+
+def vcs_checkout(source_repo, dest, store_path,
+ base_repo=None, revision=None, branch=None):
+ # Specify method to checkout a revision. This defaults to revisions as
+ # SHA-1 strings, but also supports symbolic revisions like `tip` via the
+ # branch flag.
+ if revision:
+ revision_flag = b'--revision'
+ revision_value = revision
+ elif branch:
+ revision_flag = b'--branch'
+ revision_value = branch
+ else:
+ print('revision is not specified for checkout')
+ sys.exit(1)
+
+ # Obtain certificate fingerprints.
+ try:
+ print_line(b'vcs', 'fetching hg.mozilla.org fingerprint from %s\n' %
+ FINGERPRINT_URL)
+ res = urllib2.urlopen(FINGERPRINT_URL, timeout=10)
+ secret = res.read()
+ try:
+ secret = json.loads(secret, encoding='utf-8')
+ except ValueError:
+ print_line(b'vcs', 'invalid JSON in hg fingerprint secret')
+ sys.exit(1)
+ except (urllib2.URLError, socket.timeout):
+ print_line(b'vcs', 'Unable to retrieve current hg.mozilla.org fingerprint'
+ 'using the secret service, using fallback instead.')
+ # XXX This fingerprint will not be accurate if running on an old
+ # revision after the server fingerprint has changed.
+ secret = {'secret': FALLBACK_FINGERPRINT}
+
+ hgmo_fingerprint = secret['secret']['fingerprints'].encode('ascii')
+
+ args = [
+ b'/usr/bin/hg',
+ b'--config', b'hostsecurity.hg.mozilla.org:fingerprints=%s' % hgmo_fingerprint,
+ b'robustcheckout',
+ b'--sharebase', store_path,
+ b'--purge',
+ ]
+
+ if base_repo:
+ args.extend([b'--upstream', base_repo])
+
+ args.extend([
+ revision_flag, revision_value,
+ source_repo, dest,
+ ])
+
+ res = run_and_prefix_output(b'vcs', args,
+ extra_env={b'PYTHONUNBUFFERED': b'1'})
+ if res:
+ sys.exit(res)
+
+ # Update the current revision hash and ensure that it is well formed.
+ revision = subprocess.check_output(
+ [b'/usr/bin/hg', b'log',
+ b'--rev', b'.',
+ b'--template', b'{node}'],
+ cwd=dest)
+
+ assert re.match('^[a-f0-9]{40}$', revision)
+ return revision
+
+
+def main(args):
+ print_line(b'setup', b'run-task started\n')
+
+ if os.getuid() != 0:
+ print('assertion failed: not running as root')
+ return 1
+
+ # Arguments up to '--' are ours. After are for the main task
+ # to be executed.
+ try:
+ i = args.index('--')
+ our_args = args[0:i]
+ task_args = args[i + 1:]
+ except ValueError:
+ our_args = args
+ task_args = []
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--user', default='worker', help='user to run as')
+ parser.add_argument('--group', default='worker', help='group to run as')
+ # We allow paths to be chowned by the --user:--group before permissions are
+ # dropped. This is often necessary for caches/volumes, since they default
+ # to root:root ownership.
+ parser.add_argument('--chown', action='append',
+ help='Directory to chown to --user:--group')
+ parser.add_argument('--chown-recursive', action='append',
+ help='Directory to recursively chown to --user:--group')
+ parser.add_argument('--vcs-checkout',
+ help='Directory where Gecko checkout should be created')
+ parser.add_argument('--tools-checkout',
+ help='Directory where build/tools checkout should be created')
+
+ args = parser.parse_args(our_args)
+
+ try:
+ user = pwd.getpwnam(args.user)
+ except KeyError:
+ print('could not find user %s; specify --user to a known user' %
+ args.user)
+ return 1
+ try:
+ group = grp.getgrnam(args.group)
+ except KeyError:
+ print('could not find group %s; specify --group to a known group' %
+ args.group)
+ return 1
+
+ uid = user.pw_uid
+ gid = group.gr_gid
+
+ # Find all groups to which this user is a member.
+ gids = [g.gr_gid for g in grp.getgrall() if args.group in g.gr_mem]
+
+ wanted_dir_mode = stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
+
+ def set_dir_permissions(path, uid, gid):
+ st = os.lstat(path)
+
+ if st.st_uid != uid or st.st_gid != gid:
+ os.chown(path, uid, gid)
+
+ # Also make sure dirs are writable in case we need to delete
+ # them.
+ if st.st_mode & wanted_dir_mode != wanted_dir_mode:
+ os.chmod(path, st.st_mode | wanted_dir_mode)
+
+ # Change ownership of requested paths.
+ # FUTURE: parse argument values for user/group if we don't want to
+ # use --user/--group.
+ for path in args.chown or []:
+ print_line(b'chown', b'changing ownership of %s to %s:%s\n' % (
+ path, user.pw_name, group.gr_name))
+ set_dir_permissions(path, uid, gid)
+
+ for path in args.chown_recursive or []:
+ print_line(b'chown', b'recursively changing ownership of %s to %s:%s\n' %
+ (path, user.pw_name, group.gr_name))
+
+ set_dir_permissions(path, uid, gid)
+
+ for root, dirs, files in os.walk(path):
+ for d in dirs:
+ set_dir_permissions(os.path.join(root, d), uid, gid)
+
+ for f in files:
+ # File may be a symlink that points to nowhere. In which case
+ # os.chown() would fail because it attempts to follow the
+ # symlink. We only care about directory entries, not what
+ # they point to. So setting the owner of the symlink should
+ # be sufficient.
+ os.lchown(os.path.join(root, f), uid, gid)
+
+ def prepare_checkout_dir(checkout):
+ if not checkout:
+ return
+
+ # Ensure the directory for the source checkout exists.
+ try:
+ os.makedirs(os.path.dirname(checkout))
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ # And that it is owned by the appropriate user/group.
+ os.chown(os.path.dirname(checkout), uid, gid)
+
+ # And ensure the shared store path exists and has proper permissions.
+ if 'HG_STORE_PATH' not in os.environ:
+ print('error: HG_STORE_PATH environment variable not set')
+ sys.exit(1)
+
+ store_path = os.environ['HG_STORE_PATH']
+ try:
+ os.makedirs(store_path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ os.chown(store_path, uid, gid)
+
+ prepare_checkout_dir(args.vcs_checkout)
+ prepare_checkout_dir(args.tools_checkout)
+
+ # Drop permissions to requested user.
+ # This code is modeled after what `sudo` was observed to do in a Docker
+ # container. We do not bother calling setrlimit() because containers have
+ # their own limits.
+ print_line(b'setup', b'running as %s:%s\n' % (args.user, args.group))
+ os.setgroups(gids)
+ os.umask(022)
+ os.setresgid(gid, gid, gid)
+ os.setresuid(uid, uid, uid)
+
+ # Checkout the repository, setting the GECKO_HEAD_REV to the current
+ # revision hash. Revision hashes have priority over symbolic revisions. We
+ # disallow running tasks with symbolic revisions unless they have been
+ # resolved by a checkout.
+ if args.vcs_checkout:
+ base_repo = os.environ.get('GECKO_BASE_REPOSITORY')
+ # Some callers set the base repository to mozilla-central for historical
+ # reasons. Switch to mozilla-unified because robustcheckout works best
+ # with it.
+ if base_repo == 'https://hg.mozilla.org/mozilla-central':
+ base_repo = b'https://hg.mozilla.org/mozilla-unified'
+
+ os.environ['GECKO_HEAD_REV'] = vcs_checkout(
+ os.environ['GECKO_HEAD_REPOSITORY'],
+ args.vcs_checkout,
+ os.environ['HG_STORE_PATH'],
+ base_repo=base_repo,
+ revision=os.environ.get('GECKO_HEAD_REV'),
+ branch=os.environ.get('GECKO_HEAD_REF'))
+
+ elif not os.environ.get('GECKO_HEAD_REV') and \
+ os.environ.get('GECKO_HEAD_REF'):
+ print('task should be defined in terms of non-symbolic revision')
+ return 1
+
+ if args.tools_checkout:
+ vcs_checkout(b'https://hg.mozilla.org/build/tools',
+ args.tools_checkout,
+ os.environ['HG_STORE_PATH'],
+ # Always check out the latest commit on default branch.
+ # This is non-deterministic!
+ branch=b'default')
+
+ return run_and_prefix_output(b'task', task_args)
+
+
+if __name__ == '__main__':
+ # Unbuffer stdio.
+ sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
+ sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
+
+ sys.exit(main(sys.argv[1:]))
diff --git a/testing/docker/recipes/tooltool.py b/testing/docker/recipes/tooltool.py
new file mode 100755
index 000000000..952f9a5a7
--- /dev/null
+++ b/testing/docker/recipes/tooltool.py
@@ -0,0 +1,1022 @@
+#!/usr/bin/env python
+
+# tooltool is a lookaside cache implemented in Python
+# Copyright (C) 2011 John H. Ford <john@johnford.info>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation version 2
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# A manifest file specifies files in that directory that are stored
+# elsewhere. This file should only list files in the same directory
+# in which the manifest file resides and it should be called
+# 'manifest.tt'
+
+import hashlib
+import httplib
+import json
+import logging
+import optparse
+import os
+import shutil
+import sys
+import tarfile
+import tempfile
+import threading
+import time
+import urllib2
+import urlparse
+import zipfile
+
+from subprocess import PIPE
+from subprocess import Popen
+
+__version__ = '1'
+
+DEFAULT_MANIFEST_NAME = 'manifest.tt'
+TOOLTOOL_PACKAGE_SUFFIX = '.TOOLTOOL-PACKAGE'
+
+
+log = logging.getLogger(__name__)
+
+
+class FileRecordJSONEncoderException(Exception):
+ pass
+
+
+class InvalidManifest(Exception):
+ pass
+
+
+class ExceptionWithFilename(Exception):
+
+ def __init__(self, filename):
+ Exception.__init__(self)
+ self.filename = filename
+
+
+class BadFilenameException(ExceptionWithFilename):
+ pass
+
+
+class DigestMismatchException(ExceptionWithFilename):
+ pass
+
+
+class MissingFileException(ExceptionWithFilename):
+ pass
+
+
+class FileRecord(object):
+
+ def __init__(self, filename, size, digest, algorithm, unpack=False,
+ visibility=None, setup=None):
+ object.__init__(self)
+ if '/' in filename or '\\' in filename:
+ log.error(
+ "The filename provided contains path information and is, therefore, invalid.")
+ raise BadFilenameException(filename=filename)
+ self.filename = filename
+ self.size = size
+ self.digest = digest
+ self.algorithm = algorithm
+ self.unpack = unpack
+ self.visibility = visibility
+ self.setup = setup
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if self.filename == other.filename and \
+ self.size == other.size and \
+ self.digest == other.digest and \
+ self.algorithm == other.algorithm and \
+ self.visibility == other.visibility:
+ return True
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ return "%s.%s(filename='%s', size=%s, digest='%s', algorithm='%s', visibility=%r)" % (
+ __name__, self.__class__.__name__, self.filename, self.size,
+ self.digest, self.algorithm, self.visibility)
+
+ def present(self):
+ # Doesn't check validity
+ return os.path.exists(self.filename)
+
+ def validate_size(self):
+ if self.present():
+ return self.size == os.path.getsize(self.filename)
+ else:
+ log.debug(
+ "trying to validate size on a missing file, %s", self.filename)
+ raise MissingFileException(filename=self.filename)
+
+ def validate_digest(self):
+ if self.present():
+ with open(self.filename, 'rb') as f:
+ return self.digest == digest_file(f, self.algorithm)
+ else:
+ log.debug(
+ "trying to validate digest on a missing file, %s', self.filename")
+ raise MissingFileException(filename=self.filename)
+
+ def validate(self):
+ if self.validate_size():
+ if self.validate_digest():
+ return True
+ return False
+
+ def describe(self):
+ if self.present() and self.validate():
+ return "'%s' is present and valid" % self.filename
+ elif self.present():
+ return "'%s' is present and invalid" % self.filename
+ else:
+ return "'%s' is absent" % self.filename
+
+
+def create_file_record(filename, algorithm):
+ fo = open(filename, 'rb')
+ stored_filename = os.path.split(filename)[1]
+ fr = FileRecord(stored_filename, os.path.getsize(
+ filename), digest_file(fo, algorithm), algorithm)
+ fo.close()
+ return fr
+
+
+class FileRecordJSONEncoder(json.JSONEncoder):
+
+ def encode_file_record(self, obj):
+ if not issubclass(type(obj), FileRecord):
+ err = "FileRecordJSONEncoder is only for FileRecord and lists of FileRecords, " \
+ "not %s" % obj.__class__.__name__
+ log.warn(err)
+ raise FileRecordJSONEncoderException(err)
+ else:
+ rv = {
+ 'filename': obj.filename,
+ 'size': obj.size,
+ 'algorithm': obj.algorithm,
+ 'digest': obj.digest,
+ }
+ if obj.unpack:
+ rv['unpack'] = True
+ if obj.visibility is not None:
+ rv['visibility'] = obj.visibility
+ if obj.setup:
+ rv['setup'] = obj.setup
+ return rv
+
+ def default(self, f):
+ if issubclass(type(f), list):
+ record_list = []
+ for i in f:
+ record_list.append(self.encode_file_record(i))
+ return record_list
+ else:
+ return self.encode_file_record(f)
+
+
+class FileRecordJSONDecoder(json.JSONDecoder):
+
+ """I help the json module materialize a FileRecord from
+ a JSON file. I understand FileRecords and lists of
+ FileRecords. I ignore things that I don't expect for now"""
+ # TODO: make this more explicit in what it's looking for
+ # and error out on unexpected things
+
+ def process_file_records(self, obj):
+ if isinstance(obj, list):
+ record_list = []
+ for i in obj:
+ record = self.process_file_records(i)
+ if issubclass(type(record), FileRecord):
+ record_list.append(record)
+ return record_list
+ required_fields = [
+ 'filename',
+ 'size',
+ 'algorithm',
+ 'digest',
+ ]
+ if isinstance(obj, dict):
+ missing = False
+ for req in required_fields:
+ if req not in obj:
+ missing = True
+ break
+
+ if not missing:
+ unpack = obj.get('unpack', False)
+ visibility = obj.get('visibility', None)
+ setup = obj.get('setup')
+ rv = FileRecord(
+ obj['filename'], obj['size'], obj['digest'], obj['algorithm'],
+ unpack, visibility, setup)
+ log.debug("materialized %s" % rv)
+ return rv
+ return obj
+
+ def decode(self, s):
+ decoded = json.JSONDecoder.decode(self, s)
+ rv = self.process_file_records(decoded)
+ return rv
+
+
+class Manifest(object):
+
+ valid_formats = ('json',)
+
+ def __init__(self, file_records=None):
+ self.file_records = file_records or []
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if len(self.file_records) != len(other.file_records):
+ log.debug('Manifests differ in number of files')
+ return False
+ # sort the file records by filename before comparing
+ mine = sorted((fr.filename, fr) for fr in self.file_records)
+ theirs = sorted((fr.filename, fr) for fr in other.file_records)
+ return mine == theirs
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __deepcopy__(self, memo):
+ # This is required for a deep copy
+ return Manifest(self.file_records[:])
+
+ def __copy__(self):
+ return Manifest(self.file_records)
+
+ def copy(self):
+ return Manifest(self.file_records[:])
+
+ def present(self):
+ return all(i.present() for i in self.file_records)
+
+ def validate_sizes(self):
+ return all(i.validate_size() for i in self.file_records)
+
+ def validate_digests(self):
+ return all(i.validate_digest() for i in self.file_records)
+
+ def validate(self):
+ return all(i.validate() for i in self.file_records)
+
+ def load(self, data_file, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ try:
+ self.file_records.extend(
+ json.load(data_file, cls=FileRecordJSONDecoder))
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def loads(self, data_string, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ try:
+ self.file_records.extend(
+ json.loads(data_string, cls=FileRecordJSONDecoder))
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def dump(self, output_file, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ rv = json.dump(
+ self.file_records, output_file, indent=0, cls=FileRecordJSONEncoder,
+ separators=(',', ': '))
+ print >> output_file, ''
+ return rv
+
+ def dumps(self, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ return json.dumps(self.file_records, cls=FileRecordJSONEncoder)
+
+
+def digest_file(f, a):
+ """I take a file like object 'f' and return a hex-string containing
+ of the result of the algorithm 'a' applied to 'f'."""
+ h = hashlib.new(a)
+ chunk_size = 1024 * 10
+ data = f.read(chunk_size)
+ while data:
+ h.update(data)
+ data = f.read(chunk_size)
+ name = repr(f.name) if hasattr(f, 'name') else 'a file'
+ log.debug('hashed %s with %s to be %s', name, a, h.hexdigest())
+ return h.hexdigest()
+
+
+def execute(cmd):
+ """Execute CMD, logging its stdout at the info level"""
+ process = Popen(cmd, shell=True, stdout=PIPE)
+ while True:
+ line = process.stdout.readline()
+ if not line:
+ break
+ log.info(line.replace('\n', ' '))
+ return process.wait() == 0
+
+
+def open_manifest(manifest_file):
+ """I know how to take a filename and load it into a Manifest object"""
+ if os.path.exists(manifest_file):
+ manifest = Manifest()
+ with open(manifest_file, "rb") as f:
+ manifest.load(f)
+ log.debug("loaded manifest from file '%s'" % manifest_file)
+ return manifest
+ else:
+ log.debug("tried to load absent file '%s' as manifest" % manifest_file)
+ raise InvalidManifest(
+ "manifest file '%s' does not exist" % manifest_file)
+
+
+def list_manifest(manifest_file):
+ """I know how print all the files in a location"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error("failed to load manifest file at '%s': %s" % (
+ manifest_file,
+ str(e),
+ ))
+ return False
+ for f in manifest.file_records:
+ print "%s\t%s\t%s" % ("P" if f.present() else "-",
+ "V" if f.present() and f.validate() else "-",
+ f.filename)
+ return True
+
+
+def validate_manifest(manifest_file):
+ """I validate that all files in a manifest are present and valid but
+ don't fetch or delete them if they aren't"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error("failed to load manifest file at '%s': %s" % (
+ manifest_file,
+ str(e),
+ ))
+ return False
+ invalid_files = []
+ absent_files = []
+ for f in manifest.file_records:
+ if not f.present():
+ absent_files.append(f)
+ else:
+ if not f.validate():
+ invalid_files.append(f)
+ if len(invalid_files + absent_files) == 0:
+ return True
+ else:
+ return False
+
+
+def add_files(manifest_file, algorithm, filenames, visibility, unpack):
+ # returns True if all files successfully added, False if not
+ # and doesn't catch library Exceptions. If any files are already
+ # tracked in the manifest, return will be False because they weren't
+ # added
+ all_files_added = True
+ # Create a old_manifest object to add to
+ if os.path.exists(manifest_file):
+ old_manifest = open_manifest(manifest_file)
+ else:
+ old_manifest = Manifest()
+ log.debug("creating a new manifest file")
+ new_manifest = Manifest() # use a different manifest for the output
+ for filename in filenames:
+ log.debug("adding %s" % filename)
+ path, name = os.path.split(filename)
+ new_fr = create_file_record(filename, algorithm)
+ new_fr.visibility = visibility
+ new_fr.unpack = unpack
+ log.debug("appending a new file record to manifest file")
+ add = True
+ for fr in old_manifest.file_records:
+ log.debug("manifest file has '%s'" % "', ".join(
+ [x.filename for x in old_manifest.file_records]))
+ if new_fr == fr:
+ log.info("file already in old_manifest")
+ add = False
+ elif filename == fr.filename:
+ log.error("manifest already contains a different file named %s" % filename)
+ add = False
+ if add:
+ new_manifest.file_records.append(new_fr)
+ log.debug("added '%s' to manifest" % filename)
+ else:
+ all_files_added = False
+ # copy any files in the old manifest that aren't in the new one
+ new_filenames = set(fr.filename for fr in new_manifest.file_records)
+ for old_fr in old_manifest.file_records:
+ if old_fr.filename not in new_filenames:
+ new_manifest.file_records.append(old_fr)
+ with open(manifest_file, 'wb') as output:
+ new_manifest.dump(output, fmt='json')
+ return all_files_added
+
+
+def touch(f):
+ """Used to modify mtime in cached files;
+ mtime is used by the purge command"""
+ try:
+ os.utime(f, None)
+ except OSError:
+ log.warn('impossible to update utime of file %s' % f)
+
+
+def fetch_file(base_urls, file_record, grabchunk=1024 * 4, auth_file=None, region=None):
+ # A file which is requested to be fetched that exists locally will be
+ # overwritten by this function
+ fd, temp_path = tempfile.mkstemp(dir=os.getcwd())
+ os.close(fd)
+ fetched_path = None
+ for base_url in base_urls:
+ # Generate the URL for the file on the server side
+ url = urlparse.urljoin(base_url,
+ '%s/%s' % (file_record.algorithm, file_record.digest))
+ if region is not None:
+ url += '?region=' + region
+
+ log.info("Attempting to fetch from '%s'..." % base_url)
+
+ # Well, the file doesn't exist locally. Let's fetch it.
+ try:
+ req = urllib2.Request(url)
+ _authorize(req, auth_file)
+ f = urllib2.urlopen(req)
+ log.debug("opened %s for reading" % url)
+ with open(temp_path, 'wb') as out:
+ k = True
+ size = 0
+ while k:
+ # TODO: print statistics as file transfers happen both for info and to stop
+ # buildbot timeouts
+ indata = f.read(grabchunk)
+ out.write(indata)
+ size += len(indata)
+ if indata == '':
+ k = False
+ log.info("File %s fetched from %s as %s" %
+ (file_record.filename, base_url, temp_path))
+ fetched_path = temp_path
+ break
+ except (urllib2.URLError, urllib2.HTTPError, ValueError) as e:
+ log.info("...failed to fetch '%s' from %s" %
+ (file_record.filename, base_url))
+ log.debug("%s" % e)
+ except IOError: # pragma: no cover
+ log.info("failed to write to temporary file for '%s'" %
+ file_record.filename, exc_info=True)
+
+ # cleanup temp file in case of issues
+ if fetched_path:
+ return os.path.split(fetched_path)[1]
+ else:
+ try:
+ os.remove(temp_path)
+ except OSError: # pragma: no cover
+ pass
+ return None
+
+
+def clean_path(dirname):
+ """Remove a subtree if is exists. Helper for unpack_file()."""
+ if os.path.exists(dirname):
+ log.info('rm tree: %s' % dirname)
+ shutil.rmtree(dirname)
+
+
+def unpack_file(filename, setup=None):
+ """Untar `filename`, assuming it is uncompressed or compressed with bzip2,
+ xz, gzip, or unzip a zip file. The file is assumed to contain a single
+ directory with a name matching the base of the given filename.
+ Xz support is handled by shelling out to 'tar'."""
+ if tarfile.is_tarfile(filename):
+ tar_file, zip_ext = os.path.splitext(filename)
+ base_file, tar_ext = os.path.splitext(tar_file)
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ tar = tarfile.open(filename)
+ tar.extractall()
+ tar.close()
+ elif filename.endswith('.tar.xz'):
+ base_file = filename.replace('.tar.xz', '')
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ if not execute('tar -Jxf %s 2>&1' % filename):
+ return False
+ elif zipfile.is_zipfile(filename):
+ base_file = filename.replace('.zip', '')
+ clean_path(base_file)
+ log.info('unzipping "%s"' % filename)
+ z = zipfile.ZipFile(filename)
+ z.extractall()
+ z.close()
+ else:
+ log.error("Unknown archive extension for filename '%s'" % filename)
+ return False
+
+ if setup and not execute(os.path.join(base_file, setup)):
+ return False
+ return True
+
+
+def fetch_files(manifest_file, base_urls, filenames=[], cache_folder=None,
+ auth_file=None, region=None):
+ # Lets load the manifest file
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error("failed to load manifest file at '%s': %s" % (
+ manifest_file,
+ str(e),
+ ))
+ return False
+
+ # we want to track files already in current working directory AND valid
+ # we will not need to fetch these
+ present_files = []
+
+ # We want to track files that fail to be fetched as well as
+ # files that are fetched
+ failed_files = []
+ fetched_files = []
+
+ # Files that we want to unpack.
+ unpack_files = []
+
+ # Setup for unpacked files.
+ setup_files = {}
+
+ # Lets go through the manifest and fetch the files that we want
+ for f in manifest.file_records:
+ # case 1: files are already present
+ if f.present():
+ if f.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # we have an invalid file here, better to cleanup!
+ # this invalid file needs to be replaced with a good one
+ # from the local cash or fetched from a tooltool server
+ log.info("File %s is present locally but it is invalid, so I will remove it "
+ "and try to fetch it" % f.filename)
+ os.remove(os.path.join(os.getcwd(), f.filename))
+
+ # check if file is already in cache
+ if cache_folder and f.filename not in present_files:
+ try:
+ shutil.copy(os.path.join(cache_folder, f.digest),
+ os.path.join(os.getcwd(), f.filename))
+ log.info("File %s retrieved from local cache %s" %
+ (f.filename, cache_folder))
+ touch(os.path.join(cache_folder, f.digest))
+
+ filerecord_for_validation = FileRecord(
+ f.filename, f.size, f.digest, f.algorithm)
+ if filerecord_for_validation.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # the file copied from the cache is invalid, better to
+ # clean up the cache version itself as well
+ log.warn("File %s retrieved from cache is invalid! I am deleting it from the "
+ "cache as well" % f.filename)
+ os.remove(os.path.join(os.getcwd(), f.filename))
+ os.remove(os.path.join(cache_folder, f.digest))
+ except IOError:
+ log.info("File %s not present in local cache folder %s" %
+ (f.filename, cache_folder))
+
+ # now I will try to fetch all files which are not already present and
+ # valid, appending a suffix to avoid race conditions
+ temp_file_name = None
+ # 'filenames' is the list of filenames to be managed, if this variable
+ # is a non empty list it can be used to filter if filename is in
+ # present_files, it means that I have it already because it was already
+ # either in the working dir or in the cache
+ if (f.filename in filenames or len(filenames) == 0) and f.filename not in present_files:
+ log.debug("fetching %s" % f.filename)
+ temp_file_name = fetch_file(base_urls, f, auth_file=auth_file, region=region)
+ if temp_file_name:
+ fetched_files.append((f, temp_file_name))
+ else:
+ failed_files.append(f.filename)
+ else:
+ log.debug("skipping %s" % f.filename)
+
+ if f.setup:
+ if f.unpack:
+ setup_files[f.filename] = f.setup
+ else:
+ log.error("'setup' requires 'unpack' being set for %s" % f.filename)
+ failed_files.append(f.filename)
+
+ # lets ensure that fetched files match what the manifest specified
+ for localfile, temp_file_name in fetched_files:
+ # since I downloaded to a temp file, I need to perform all validations on the temp file
+ # this is why filerecord_for_validation is created
+
+ filerecord_for_validation = FileRecord(
+ temp_file_name, localfile.size, localfile.digest, localfile.algorithm)
+
+ if filerecord_for_validation.validate():
+ # great!
+ # I can rename the temp file
+ log.info("File integrity verified, renaming %s to %s" %
+ (temp_file_name, localfile.filename))
+ os.rename(os.path.join(os.getcwd(), temp_file_name),
+ os.path.join(os.getcwd(), localfile.filename))
+
+ if localfile.unpack:
+ unpack_files.append(localfile.filename)
+
+ # if I am using a cache and a new file has just been retrieved from a
+ # remote location, I need to update the cache as well
+ if cache_folder:
+ log.info("Updating local cache %s..." % cache_folder)
+ try:
+ if not os.path.exists(cache_folder):
+ log.info("Creating cache in %s..." % cache_folder)
+ os.makedirs(cache_folder, 0700)
+ shutil.copy(os.path.join(os.getcwd(), localfile.filename),
+ os.path.join(cache_folder, localfile.digest))
+ log.info("Local cache %s updated with %s" % (cache_folder,
+ localfile.filename))
+ touch(os.path.join(cache_folder, localfile.digest))
+ except (OSError, IOError):
+ log.warning('Impossible to add file %s to cache folder %s' %
+ (localfile.filename, cache_folder), exc_info=True)
+ else:
+ failed_files.append(localfile.filename)
+ log.error("'%s'" % filerecord_for_validation.describe())
+ os.remove(temp_file_name)
+
+ # Unpack files that need to be unpacked.
+ for filename in unpack_files:
+ if not unpack_file(filename, setup_files.get(filename)):
+ failed_files.append(filename)
+
+ # If we failed to fetch or validate a file, we need to fail
+ if len(failed_files) > 0:
+ log.error("The following files failed: '%s'" %
+ "', ".join(failed_files))
+ return False
+ return True
+
+
+def freespace(p):
+ "Returns the number of bytes free under directory `p`"
+ if sys.platform == 'win32': # pragma: no cover
+ # os.statvfs doesn't work on Windows
+ import win32file
+
+ secsPerClus, bytesPerSec, nFreeClus, totClus = win32file.GetDiskFreeSpace(
+ p)
+ return secsPerClus * bytesPerSec * nFreeClus
+ else:
+ r = os.statvfs(p)
+ return r.f_frsize * r.f_bavail
+
+
+def purge(folder, gigs):
+ """If gigs is non 0, it deletes files in `folder` until `gigs` GB are free,
+ starting from older files. If gigs is 0, a full purge will be performed.
+ No recursive deletion of files in subfolder is performed."""
+
+ full_purge = bool(gigs == 0)
+ gigs *= 1024 * 1024 * 1024
+
+ if not full_purge and freespace(folder) >= gigs:
+ log.info("No need to cleanup")
+ return
+
+ files = []
+ for f in os.listdir(folder):
+ p = os.path.join(folder, f)
+ # it deletes files in folder without going into subfolders,
+ # assuming the cache has a flat structure
+ if not os.path.isfile(p):
+ continue
+ mtime = os.path.getmtime(p)
+ files.append((mtime, p))
+
+ # iterate files sorted by mtime
+ for _, f in sorted(files):
+ log.info("removing %s to free up space" % f)
+ try:
+ os.remove(f)
+ except OSError:
+ log.info("Impossible to remove %s" % f, exc_info=True)
+ if not full_purge and freespace(folder) >= gigs:
+ break
+
+
+def _log_api_error(e):
+ if hasattr(e, 'hdrs') and e.hdrs['content-type'] == 'application/json':
+ json_resp = json.load(e.fp)
+ log.error("%s: %s" % (json_resp['error']['name'],
+ json_resp['error']['description']))
+ else:
+ log.exception("Error making RelengAPI request:")
+
+
+def _authorize(req, auth_file):
+ if auth_file:
+ log.debug("using bearer token in %s" % auth_file)
+ req.add_unredirected_header('Authorization',
+ 'Bearer %s' % (open(auth_file, "rb").read().strip()))
+
+
+def _send_batch(base_url, auth_file, batch, region):
+ url = urlparse.urljoin(base_url, 'upload')
+ if region is not None:
+ url += "?region=" + region
+ req = urllib2.Request(url, json.dumps(batch), {'Content-Type': 'application/json'})
+ _authorize(req, auth_file)
+ try:
+ resp = urllib2.urlopen(req)
+ except (urllib2.URLError, urllib2.HTTPError) as e:
+ _log_api_error(e)
+ return None
+ return json.load(resp)['result']
+
+
+def _s3_upload(filename, file):
+ # urllib2 does not support streaming, so we fall back to good old httplib
+ url = urlparse.urlparse(file['put_url'])
+ cls = httplib.HTTPSConnection if url.scheme == 'https' else httplib.HTTPConnection
+ host, port = url.netloc.split(':') if ':' in url.netloc else (url.netloc, 443)
+ port = int(port)
+ conn = cls(host, port)
+ try:
+ req_path = "%s?%s" % (url.path, url.query) if url.query else url.path
+ conn.request('PUT', req_path, open(filename, "rb"),
+ {'Content-type': 'application/octet-stream'})
+ resp = conn.getresponse()
+ resp_body = resp.read()
+ conn.close()
+ if resp.status != 200:
+ raise RuntimeError("Non-200 return from AWS: %s %s\n%s" %
+ (resp.status, resp.reason, resp_body))
+ except Exception:
+ file['upload_exception'] = sys.exc_info()
+ file['upload_ok'] = False
+ else:
+ file['upload_ok'] = True
+
+
+def _notify_upload_complete(base_url, auth_file, file):
+ req = urllib2.Request(
+ urlparse.urljoin(
+ base_url,
+ 'upload/complete/%(algorithm)s/%(digest)s' % file))
+ _authorize(req, auth_file)
+ try:
+ urllib2.urlopen(req)
+ except urllib2.HTTPError as e:
+ if e.code != 409:
+ _log_api_error(e)
+ return
+ # 409 indicates that the upload URL hasn't expired yet and we
+ # should retry after a delay
+ to_wait = int(e.headers.get('X-Retry-After', 60))
+ log.warning("Waiting %d seconds for upload URLs to expire" % to_wait)
+ time.sleep(to_wait)
+ _notify_upload_complete(base_url, auth_file, file)
+ except Exception:
+ log.exception("While notifying server of upload completion:")
+
+
+def upload(manifest, message, base_urls, auth_file, region):
+ try:
+ manifest = open_manifest(manifest)
+ except InvalidManifest:
+ log.exception("failed to load manifest file at '%s'")
+ return False
+
+ # verify the manifest, since we'll need the files present to upload
+ if not manifest.validate():
+ log.error('manifest is invalid')
+ return False
+
+ if any(fr.visibility is None for fr in manifest.file_records):
+ log.error('All files in a manifest for upload must have a visibility set')
+
+ # convert the manifest to an upload batch
+ batch = {
+ 'message': message,
+ 'files': {},
+ }
+ for fr in manifest.file_records:
+ batch['files'][fr.filename] = {
+ 'size': fr.size,
+ 'digest': fr.digest,
+ 'algorithm': fr.algorithm,
+ 'visibility': fr.visibility,
+ }
+
+ # make the upload request
+ resp = _send_batch(base_urls[0], auth_file, batch, region)
+ if not resp:
+ return None
+ files = resp['files']
+
+ # Upload the files, each in a thread. This allows us to start all of the
+ # uploads before any of the URLs expire.
+ threads = {}
+ for filename, file in files.iteritems():
+ if 'put_url' in file:
+ log.info("%s: starting upload" % (filename,))
+ thd = threading.Thread(target=_s3_upload,
+ args=(filename, file))
+ thd.daemon = 1
+ thd.start()
+ threads[filename] = thd
+ else:
+ log.info("%s: already exists on server" % (filename,))
+
+ # re-join all of those threads as they exit
+ success = True
+ while threads:
+ for filename, thread in threads.items():
+ if not thread.is_alive():
+ # _s3_upload has annotated file with result information
+ file = files[filename]
+ thread.join()
+ if file['upload_ok']:
+ log.info("%s: uploaded" % filename)
+ else:
+ log.error("%s: failed" % filename,
+ exc_info=file['upload_exception'])
+ success = False
+ del threads[filename]
+
+ # notify the server that the uploads are completed. If the notification
+ # fails, we don't consider that an error (the server will notice
+ # eventually)
+ for filename, file in files.iteritems():
+ if 'put_url' in file and file['upload_ok']:
+ log.info("notifying server of upload completion for %s" % (filename,))
+ _notify_upload_complete(base_urls[0], auth_file, file)
+
+ return success
+
+
+def process_command(options, args):
+ """ I know how to take a list of program arguments and
+ start doing the right thing with them"""
+ cmd = args[0]
+ cmd_args = args[1:]
+ log.debug("processing '%s' command with args '%s'" %
+ (cmd, '", "'.join(cmd_args)))
+ log.debug("using options: %s" % options)
+
+ if cmd == 'list':
+ return list_manifest(options['manifest'])
+ if cmd == 'validate':
+ return validate_manifest(options['manifest'])
+ elif cmd == 'add':
+ return add_files(options['manifest'], options['algorithm'], cmd_args,
+ options['visibility'], options['unpack'])
+ elif cmd == 'purge':
+ if options['cache_folder']:
+ purge(folder=options['cache_folder'], gigs=options['size'])
+ else:
+ log.critical('please specify the cache folder to be purged')
+ return False
+ elif cmd == 'fetch':
+ return fetch_files(
+ options['manifest'],
+ options['base_url'],
+ cmd_args,
+ cache_folder=options['cache_folder'],
+ auth_file=options.get("auth_file"),
+ region=options.get('region'))
+ elif cmd == 'upload':
+ if not options.get('message'):
+ log.critical('upload command requires a message')
+ return False
+ return upload(
+ options.get('manifest'),
+ options.get('message'),
+ options.get('base_url'),
+ options.get('auth_file'),
+ options.get('region'))
+ else:
+ log.critical('command "%s" is not implemented' % cmd)
+ return False
+
+
+def main(argv, _skip_logging=False):
+ # Set up option parsing
+ parser = optparse.OptionParser()
+ parser.add_option('-q', '--quiet', default=logging.INFO,
+ dest='loglevel', action='store_const', const=logging.ERROR)
+ parser.add_option('-v', '--verbose',
+ dest='loglevel', action='store_const', const=logging.DEBUG)
+ parser.add_option('-m', '--manifest', default=DEFAULT_MANIFEST_NAME,
+ dest='manifest', action='store',
+ help='specify the manifest file to be operated on')
+ parser.add_option('-d', '--algorithm', default='sha512',
+ dest='algorithm', action='store',
+ help='hashing algorithm to use (only sha512 is allowed)')
+ parser.add_option('--visibility', default=None,
+ dest='visibility', choices=['internal', 'public'],
+ help='Visibility level of this file; "internal" is for '
+ 'files that cannot be distributed out of the company '
+ 'but not for secrets; "public" files are available to '
+ 'anyone withou trestriction')
+ parser.add_option('--unpack', default=False,
+ dest='unpack', action='store_true',
+ help='Request unpacking this file after fetch.'
+ ' This is helpful with tarballs.')
+ parser.add_option('-o', '--overwrite', default=False,
+ dest='overwrite', action='store_true',
+ help='UNUSED; present for backward compatibility')
+ parser.add_option('--url', dest='base_url', action='append',
+ help='RelengAPI URL ending with /tooltool/; default '
+ 'is appropriate for Mozilla')
+ parser.add_option('-c', '--cache-folder', dest='cache_folder',
+ help='Local cache folder')
+ parser.add_option('-s', '--size',
+ help='free space required (in GB)', dest='size',
+ type='float', default=0.)
+ parser.add_option('-r', '--region', help='Preferred AWS region for upload or fetch; '
+ 'example: --region=us-west-2')
+ parser.add_option('--message',
+ help='The "commit message" for an upload; format with a bug number '
+ 'and brief comment',
+ dest='message')
+ parser.add_option('--authentication-file',
+ help='Use the RelengAPI token found in the given file to '
+ 'authenticate to the RelengAPI server.',
+ dest='auth_file')
+
+ (options_obj, args) = parser.parse_args(argv[1:])
+
+ # default the options list if not provided
+ if not options_obj.base_url:
+ options_obj.base_url = ['https://api.pub.build.mozilla.org/tooltool/']
+
+ # ensure all URLs have a trailing slash
+ def add_slash(url):
+ return url if url.endswith('/') else (url + '/')
+ options_obj.base_url = [add_slash(u) for u in options_obj.base_url]
+
+ # expand ~ in --authentication-file
+ if options_obj.auth_file:
+ options_obj.auth_file = os.path.expanduser(options_obj.auth_file)
+
+ # Dictionaries are easier to work with
+ options = vars(options_obj)
+
+ log.setLevel(options['loglevel'])
+
+ # Set up logging, for now just to the console
+ if not _skip_logging: # pragma: no cover
+ ch = logging.StreamHandler()
+ cf = logging.Formatter("%(levelname)s - %(message)s")
+ ch.setFormatter(cf)
+ log.addHandler(ch)
+
+ if options['algorithm'] != 'sha512':
+ parser.error('only --algorithm sha512 is supported')
+
+ if len(args) < 1:
+ parser.error('You must specify a command')
+
+ return 0 if process_command(options, args) else 1
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(main(sys.argv))
diff --git a/testing/docker/recipes/ubuntu1204-test-system-setup.sh b/testing/docker/recipes/ubuntu1204-test-system-setup.sh
new file mode 100644
index 000000000..4edcf00a1
--- /dev/null
+++ b/testing/docker/recipes/ubuntu1204-test-system-setup.sh
@@ -0,0 +1,279 @@
+#!/usr/bin/env bash
+
+set -ve
+
+test `whoami` == 'root'
+
+mkdir -p /setup
+cd /setup
+
+apt_packages=()
+
+apt_packages+=('alsa-base')
+apt_packages+=('alsa-utils')
+apt_packages+=('autoconf2.13')
+apt_packages+=('bluez-alsa')
+apt_packages+=('bluez-alsa:i386')
+apt_packages+=('bluez-cups')
+apt_packages+=('bluez-gstreamer')
+apt_packages+=('build-essential')
+apt_packages+=('ca-certificates')
+apt_packages+=('ccache')
+apt_packages+=('curl')
+apt_packages+=('fonts-kacst')
+apt_packages+=('fonts-kacst-one')
+apt_packages+=('fonts-liberation')
+apt_packages+=('fonts-stix')
+apt_packages+=('fonts-unfonts-core')
+apt_packages+=('fonts-unfonts-extra')
+apt_packages+=('fonts-vlgothic')
+apt_packages+=('g++-multilib')
+apt_packages+=('gcc-multilib')
+apt_packages+=('gir1.2-gnomebluetooth-1.0')
+apt_packages+=('git')
+apt_packages+=('gstreamer0.10-alsa')
+apt_packages+=('gstreamer0.10-ffmpeg')
+apt_packages+=('gstreamer0.10-plugins-bad')
+apt_packages+=('gstreamer0.10-plugins-base')
+apt_packages+=('gstreamer0.10-plugins-good')
+apt_packages+=('gstreamer0.10-plugins-ugly')
+apt_packages+=('gstreamer0.10-tools')
+apt_packages+=('language-pack-en-base')
+apt_packages+=('libasound2-dev')
+apt_packages+=('libasound2-plugins:i386')
+apt_packages+=('libcanberra-pulse')
+apt_packages+=('libcurl4-openssl-dev')
+apt_packages+=('libdbus-1-dev')
+apt_packages+=('libdbus-glib-1-dev')
+apt_packages+=('libdrm-intel1:i386')
+apt_packages+=('libdrm-nouveau1a:i386')
+apt_packages+=('libdrm-radeon1:i386')
+apt_packages+=('libdrm2:i386')
+apt_packages+=('libexpat1:i386')
+apt_packages+=('libgconf2-dev')
+apt_packages+=('libgnome-bluetooth8')
+apt_packages+=('libgstreamer-plugins-base0.10-dev')
+apt_packages+=('libgstreamer0.10-dev')
+apt_packages+=('libgtk2.0-dev')
+apt_packages+=('libiw-dev')
+apt_packages+=('libllvm2.9')
+apt_packages+=('libllvm3.0:i386')
+apt_packages+=('libncurses5:i386')
+apt_packages+=('libnotify-dev')
+apt_packages+=('libpulse-dev')
+apt_packages+=('libpulse-mainloop-glib0:i386')
+apt_packages+=('libpulsedsp:i386')
+apt_packages+=('libsdl1.2debian:i386')
+apt_packages+=('libsox-fmt-alsa')
+apt_packages+=('libx11-xcb1:i386')
+apt_packages+=('libxdamage1:i386')
+apt_packages+=('libxfixes3:i386')
+apt_packages+=('libxt-dev')
+apt_packages+=('libxxf86vm1')
+apt_packages+=('libxxf86vm1:i386')
+apt_packages+=('llvm')
+apt_packages+=('llvm-2.9')
+apt_packages+=('llvm-2.9-dev')
+apt_packages+=('llvm-2.9-runtime')
+apt_packages+=('llvm-dev')
+apt_packages+=('llvm-runtime')
+apt_packages+=('nano')
+apt_packages+=('pulseaudio')
+apt_packages+=('pulseaudio-module-X11')
+apt_packages+=('pulseaudio-module-bluetooth')
+apt_packages+=('pulseaudio-module-gconf')
+apt_packages+=('rlwrap')
+apt_packages+=('screen')
+apt_packages+=('software-properties-common')
+apt_packages+=('sudo')
+apt_packages+=('tar')
+apt_packages+=('ttf-arphic-uming')
+apt_packages+=('ttf-dejavu')
+apt_packages+=('ttf-indic-fonts-core')
+apt_packages+=('ttf-kannada-fonts')
+apt_packages+=('ttf-oriya-fonts')
+apt_packages+=('ttf-paktype')
+apt_packages+=('ttf-punjabi-fonts')
+apt_packages+=('ttf-sazanami-mincho')
+apt_packages+=('ubuntu-desktop')
+apt_packages+=('unzip')
+apt_packages+=('uuid')
+apt_packages+=('vim')
+apt_packages+=('wget')
+apt_packages+=('xvfb')
+apt_packages+=('yasm')
+apt_packages+=('zip')
+
+# get xvinfo for test-linux.sh to monitor Xvfb startup
+apt_packages+=('x11-utils')
+
+# Bug 1232407 - this allows the user to start vnc
+apt_packages+=('x11vnc')
+
+# Bug 1176031: need `xset` to disable screensavers
+apt_packages+=('x11-xserver-utils')
+
+# use Ubuntu's Python-2.7 (2.7.3 on Precise)
+apt_packages+=('python-dev')
+apt_packages+=('python-pip')
+
+apt-get update
+# This allows ubuntu-desktop to be installed without human interaction
+export DEBIAN_FRONTEND=noninteractive
+apt-get install -y --force-yes ${apt_packages[@]}
+
+dpkg-reconfigure locales
+
+tooltool_fetch() {
+ cat >manifest.tt
+ python /setup/tooltool.py fetch
+ rm manifest.tt
+}
+
+. /tmp/install-mercurial.sh
+
+# install peep
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 26912,
+ "digest": "9d730ed7852d4d217aaddda959cd5f871ef1b26dd6c513a3780bbb04a5a93a49d6b78e95c2274451a1311c10cc0a72755b269dc9af62640474e6e73a1abec370",
+ "algorithm": "sha512",
+ "filename": "peep-2.4.1.tar.gz",
+ "unpack": false
+}
+]
+EOF
+pip install peep-2.4.1.tar.gz
+
+# remaining Python utilities are installed with `peep` from upstream
+# repositories; peep verifies file integrity for us
+cat >requirements.txt <<'EOF'
+# wheel
+# sha256: 90pZQ6kAXB6Je8-H9-ivfgDAb6l3e5rWkfafn6VKh9g
+# tarball:
+# sha256: qryO8YzdvYoqnH-SvEPi_qVLEUczDWXbkg7zzpgS49w
+virtualenv==13.1.2
+EOF
+peep install -r requirements.txt
+
+# Install node
+wget https://nodejs.org/dist/v5.0.0/node-v5.0.0-linux-x64.tar.gz
+echo 'ef73b59048a0ed11d01633f0061627b7a9879257deb9add2255e4d0808f8b671 node-v5.0.0-linux-x64.tar.gz' | sha256sum -c
+tar -C /usr/local -xz --strip-components 1 < node-v5.0.0-linux-x64.tar.gz
+node -v # verify
+
+# Install custom-built Debian packages. These come from a set of repositories
+# packaged in tarballs on tooltool to make them replicable. Because they have
+# inter-dependenices, we install all repositories first, then perform the
+# installation.
+cp /etc/apt/sources.list sources.list.orig
+
+# Install a slightly newer version of libxcb
+# See bugs 975216 and 1334641 for the original build of these packages
+# NOTE: if you're re-creating this, the tarball contains an `update.sh` which will rebuild the repository.
+tooltool_fetch <<'EOF'
+[
+ {
+ "size": 9711517,
+ "visibility": "public",
+ "digest": "ecbcebfb409ad9f7f2a9b6b058e20d49e45b3fd5d94dac59e94ff9a54844611f715230468af506a10a5cd62df6df74fdf0e126d43f6bec743eb803ded0740da7",
+ "algorithm": "sha512",
+ "filename": "xcb-repo-1.8.1-2ubuntu2.1mozilla2.tgz"
+ }
+]
+EOF
+tar -zxf xcb-repo-*.tgz
+echo "deb file://$PWD/xcb precise all" >> /etc/apt/sources.list
+
+# Install a patched version of mesa, per bug 1227637. Origin of the packages themselves is unknown, as
+# these binaries were copied from the apt repositories used by puppet. Ask rail for more information.
+# NOTE: if you're re-creating this, the tarball contains an `update.sh` which will rebuild the repository.
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 590643702,
+ "visibility": "public",
+ "digest": "f03b11987c218e57073d1b7eec6cc0a753d48f600df8dde0a35fa7c4d4d30b3891c9cbcaee38ade23f038e72951cb15f0dca3f7c76cbf5bad5526baf13e91929",
+ "algorithm": "sha512",
+ "filename": "mesa-repo-9.2.1-1ubuntu3~precise1mozilla2.tgz"
+}
+]
+EOF
+tar -zxf mesa-repo-*.tgz
+echo "deb file://$PWD/mesa precise all" >> /etc/apt/sources.list
+
+# Install Valgrind (trunk, late Jan 2016) and do some crude sanity
+# checks. It has to go in /usr/local, otherwise it won't work. Copy
+# the launcher binary to /usr/bin, though, so that direct invokations
+# of /usr/bin/valgrind also work. Also install libc6-dbg since
+# Valgrind won't work at all without the debug symbols for libc.so and
+# ld.so being available.
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 41331092,
+ "visibility": "public",
+ "digest": "a89393c39171b8304fc262094a650df9a756543ffe9fbec935911e7b86842c4828b9b831698f97612abb0eca95cf7f7b3ff33ea7a9b0313b30c9be413a5efffc",
+ "algorithm": "sha512",
+ "filename": "valgrind-15775-3206-ubuntu1204.tgz"
+}
+]
+EOF
+cp valgrind-15775-3206-ubuntu1204.tgz /tmp
+(cd / && tar xzf /tmp/valgrind-15775-3206-ubuntu1204.tgz)
+rm /tmp/valgrind-15775-3206-ubuntu1204.tgz
+cp /usr/local/bin/valgrind /usr/bin/valgrind
+apt-get install -y libc6-dbg
+valgrind --version
+valgrind date
+
+# Fetch the minidump_stackwalk binary specified by the in-tree tooltool manifest.
+python /setup/tooltool.py fetch -m /tmp/minidump_stackwalk.manifest
+rm /tmp/minidump_stackwalk.manifest
+mv linux64-minidump_stackwalk /usr/local/bin/
+chmod +x /usr/local/bin/linux64-minidump_stackwalk
+
+apt-get update
+
+apt-get -q -y --force-yes install \
+ libxcb1 \
+ libxcb-render0 \
+ libxcb-shm0 \
+ libxcb-glx0 \
+ libxcb-shape0 libxcb-glx0:i386
+libxcb1_version=$(dpkg-query -s libxcb1 | grep ^Version | awk '{ print $2 }')
+[ "$libxcb1_version" = "1.8.1-2ubuntu2.1mozilla2" ] || exit 1
+
+apt-get -q -y --force-yes install \
+ libgl1-mesa-dev-lts-saucy:i386 \
+ libgl1-mesa-dri-lts-saucy \
+ libgl1-mesa-dri-lts-saucy:i386 \
+ libgl1-mesa-glx-lts-saucy \
+ libgl1-mesa-glx-lts-saucy:i386 \
+ libglapi-mesa-lts-saucy \
+ libglapi-mesa-lts-saucy:i386 \
+ libxatracker1-lts-saucy \
+ mesa-common-dev-lts-saucy:i386
+mesa_version=$(dpkg-query -s libgl1-mesa-dri-lts-saucy | grep ^Version | awk '{ print $2 }')
+[ "$mesa_version" = "9.2.1-1ubuntu3~precise1mozilla2" ] || exit 1
+
+# revert the list of repos
+cp sources.list.orig /etc/apt/sources.list
+apt-get update
+
+# node 5 requires a C++11 compiler.
+add-apt-repository ppa:ubuntu-toolchain-r/test
+apt-get update
+apt-get -y install gcc-4.8 g++-4.8
+update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 20 --slave /usr/bin/g++ g++ /usr/bin/g++-4.8
+update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.6 10 --slave /usr/bin/g++ g++ /usr/bin/g++-4.6
+
+# clean up
+apt_packages+=('mesa-common-dev')
+
+cd /
+rm -rf /setup ~/.ccache ~/.cache ~/.npm
+apt-get clean
+apt-get autoclean
+rm -f $0
diff --git a/testing/docker/recipes/ubuntu1604-test-system-setup.sh b/testing/docker/recipes/ubuntu1604-test-system-setup.sh
new file mode 100644
index 000000000..b58ee7cb1
--- /dev/null
+++ b/testing/docker/recipes/ubuntu1604-test-system-setup.sh
@@ -0,0 +1,180 @@
+#!/usr/bin/env bash
+
+set -ve
+
+test `whoami` == 'root'
+
+mkdir -p /setup
+cd /setup
+
+apt_packages=()
+
+apt_packages+=('alsa-base')
+apt_packages+=('alsa-utils')
+apt_packages+=('autoconf2.13')
+apt_packages+=('bluez-cups')
+apt_packages+=('build-essential')
+apt_packages+=('ca-certificates')
+apt_packages+=('ccache')
+apt_packages+=('curl')
+apt_packages+=('fonts-kacst')
+apt_packages+=('fonts-kacst-one')
+apt_packages+=('fonts-liberation')
+apt_packages+=('fonts-stix')
+apt_packages+=('fonts-unfonts-core')
+apt_packages+=('fonts-unfonts-extra')
+apt_packages+=('fonts-vlgothic')
+apt_packages+=('g++-multilib')
+apt_packages+=('gcc-multilib')
+apt_packages+=('gir1.2-gnomebluetooth-1.0')
+apt_packages+=('git')
+apt_packages+=('gstreamer0.10-alsa')
+apt_packages+=('gstreamer0.10-plugins-base')
+apt_packages+=('gstreamer0.10-plugins-good')
+apt_packages+=('gstreamer0.10-tools')
+apt_packages+=('language-pack-en-base')
+apt_packages+=('libasound2-dev')
+apt_packages+=('libcanberra-pulse')
+apt_packages+=('libcurl4-openssl-dev')
+apt_packages+=('libdbus-1-dev')
+apt_packages+=('libdbus-glib-1-dev')
+apt_packages+=('libgconf2-dev')
+apt_packages+=('libgstreamer-plugins-base0.10-dev')
+apt_packages+=('libgstreamer0.10-dev')
+apt_packages+=('libgtk2.0-dev')
+apt_packages+=('libiw-dev')
+apt_packages+=('libnotify-dev')
+apt_packages+=('libpulse-dev')
+apt_packages+=('libsox-fmt-alsa')
+apt_packages+=('libxt-dev')
+apt_packages+=('libxxf86vm1')
+apt_packages+=('llvm')
+apt_packages+=('llvm-dev')
+apt_packages+=('llvm-runtime')
+apt_packages+=('nano')
+apt_packages+=('pulseaudio')
+apt_packages+=('pulseaudio-module-bluetooth')
+apt_packages+=('pulseaudio-module-gconf')
+apt_packages+=('rlwrap')
+apt_packages+=('screen')
+apt_packages+=('software-properties-common')
+apt_packages+=('sudo')
+apt_packages+=('tar')
+apt_packages+=('ttf-dejavu')
+apt_packages+=('ubuntu-desktop')
+apt_packages+=('unzip')
+apt_packages+=('uuid')
+apt_packages+=('vim')
+apt_packages+=('wget')
+apt_packages+=('xvfb')
+apt_packages+=('yasm')
+apt_packages+=('zip')
+
+# get xvinfo for test-linux.sh to monitor Xvfb startup
+apt_packages+=('x11-utils')
+
+# Bug 1232407 - this allows the user to start vnc
+apt_packages+=('x11vnc')
+
+# Bug 1176031: need `xset` to disable screensavers
+apt_packages+=('x11-xserver-utils')
+
+# use Ubuntu's Python-2.7 (2.7.3 on Precise)
+apt_packages+=('python-dev')
+apt_packages+=('python-pip')
+
+apt-get update
+# This allows ubuntu-desktop to be installed without human interaction
+export DEBIAN_FRONTEND=noninteractive
+apt-get install -y -f ${apt_packages[@]}
+
+dpkg-reconfigure locales
+
+. /setup/common.sh
+. /setup/install-mercurial.sh
+
+pip install --upgrade pip
+
+pip install virtualenv
+
+# Install node
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 5676610,
+ "digest": "ce27b788dfd141a5ba7674332825fc136fe2c4f49a319dd19b3a87c8fffa7a97d86cbb8535661c9a68c9122719aa969fc6a8c886458a0df9fc822eec99ed130b",
+ "algorithm": "sha512",
+ "filename": "node-v0.10.36-linux-x64.tar.gz"
+}
+]
+
+EOF
+tar -C /usr/local -xz --strip-components 1 < node-*.tar.gz
+node -v # verify
+
+# Install custom-built Debian packages. These come from a set of repositories
+# packaged in tarballs on tooltool to make them replicable. Because they have
+# inter-dependenices, we install all repositories first, then perform the
+# installation.
+cp /etc/apt/sources.list sources.list.orig
+
+# Install Valgrind (trunk, late Jan 2016) and do some crude sanity
+# checks. It has to go in /usr/local, otherwise it won't work. Copy
+# the launcher binary to /usr/bin, though, so that direct invokations
+# of /usr/bin/valgrind also work. Also install libc6-dbg since
+# Valgrind won't work at all without the debug symbols for libc.so and
+# ld.so being available.
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 41331092,
+ "visibility": "public",
+ "digest": "a89393c39171b8304fc262094a650df9a756543ffe9fbec935911e7b86842c4828b9b831698f97612abb0eca95cf7f7b3ff33ea7a9b0313b30c9be413a5efffc",
+ "algorithm": "sha512",
+ "filename": "valgrind-15775-3206-ubuntu1204.tgz"
+}
+]
+EOF
+cp valgrind-15775-3206-ubuntu1204.tgz /tmp
+(cd / && tar xzf /tmp/valgrind-15775-3206-ubuntu1204.tgz)
+rm /tmp/valgrind-15775-3206-ubuntu1204.tgz
+cp /usr/local/bin/valgrind /usr/bin/valgrind
+apt-get install -y libc6-dbg
+valgrind --version
+valgrind date
+
+# Fetch the minidump_stackwalk binary specified by the in-tree tooltool manifest.
+python /setup/tooltool.py fetch -m /tmp/minidump_stackwalk.manifest
+rm /tmp/minidump_stackwalk.manifest
+mv linux64-minidump_stackwalk /usr/local/bin/
+chmod +x /usr/local/bin/linux64-minidump_stackwalk
+
+# adding multiverse to get 'ubuntu-restricted-extras' below
+apt-add-repository multiverse
+apt-get update
+
+# for mp4 codec (used in MSE tests)
+apt-get -q -y -f install ubuntu-restricted-extras
+
+apt-get -q -y -f install \
+ libxcb1 \
+ libxcb-render0 \
+ libxcb-shm0 \
+ libxcb-glx0 \
+ libxcb-shape0
+
+apt-get -q -y -f install \
+ libgl1-mesa-dri \
+ libgl1-mesa-glx \
+ mesa-common-dev
+
+# revert the list of repos
+cp sources.list.orig /etc/apt/sources.list
+apt-get update
+
+# clean up
+cd /
+rm -rf /setup ~/.ccache ~/.cache ~/.npm
+apt-get clean
+apt-get autoclean
+rm -f $0
diff --git a/testing/docker/recipes/xvfb.sh b/testing/docker/recipes/xvfb.sh
new file mode 100644
index 000000000..6e0e79f7d
--- /dev/null
+++ b/testing/docker/recipes/xvfb.sh
@@ -0,0 +1,75 @@
+#! /bin/bash -x
+
+set -x
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[xvfb.sh:error]" "${@}"
+ exit 1
+}
+
+cleanup_xvfb() {
+ # When you call this script with START_VNC or TASKCLUSTER_INTERACTIVE
+ # we make sure we do not kill xvfb so you do not lose your connection
+ local xvfb_pid=`pidof Xvfb`
+ local vnc=${START_VNC:-false}
+ local interactive=${TASKCLUSTER_INTERACTIVE:-false}
+ if [ -n "$xvfb_pid" ] && [[ $vnc == false ]] && [[ $interactive == false ]] ; then
+ kill $xvfb_pid || true
+ screen -XS xvfb quit || true
+ fi
+}
+
+# Attempt to start xvfb in a screen session with the given resolution and display
+# number. Up to 5 attempts will be made to start xvfb with a short delay
+# between retries
+try_xvfb() {
+ screen -dmS xvfb Xvfb :$2 -nolisten tcp -screen 0 $1 \
+ > ~/artifacts/xvfb/xvfb.log 2>&1
+ export DISPLAY=:$2
+
+ # Only error code 255 matters, because it signifies that no
+ # display could be opened. As long as we can open the display
+ # tests should work. We'll retry a few times with a sleep before
+ # failing.
+ local retry_count=0
+ local max_retries=5
+ xvfb_test=0
+ until [ $retry_count -gt $max_retries ]; do
+ xvinfo || xvfb_test=$?
+ if [ $xvfb_test != 255 ]; then
+ retry_count=$(($max_retries + 1))
+ else
+ retry_count=$(($retry_count + 1))
+ echo "Failed to start Xvfb, retry: $retry_count"
+ sleep 2
+ fi
+ done
+ if [ $xvfb_test == 255 ]; then
+ return 1
+ else
+ return 0
+ fi
+}
+
+start_xvfb() {
+ set +e
+ mkdir -p ~/artifacts/xvfb
+ local retry_count=0
+ local max_retries=2
+ local success=1
+ until [ $retry_count -gt $max_retries ]; do
+ try_xvfb $1 $2
+ success=$?
+ if [ $success -eq 0 ]; then
+ retry_count=$(($max_retries + 1))
+ else
+ retry_count=$(($retry_count + 1))
+ sleep 10
+ fi
+ done
+ set -e
+ if [ $success -eq 1 ]; then
+ fail "Could not start xvfb after ${max_retries} attempts"
+ fi
+}
diff --git a/testing/docker/rust-build/Dockerfile b/testing/docker/rust-build/Dockerfile
new file mode 100644
index 000000000..45d64def5
--- /dev/null
+++ b/testing/docker/rust-build/Dockerfile
@@ -0,0 +1,37 @@
+FROM quay.io/rust/rust-buildbot
+MAINTAINER Ralph Giles <giles@mozilla.com>
+
+# Reset user/workdir from parent image so we can install software.
+WORKDIR /
+USER root
+
+# Update base.
+RUN yum upgrade -y
+RUN yum clean all
+
+# Install tooltool directly from github.
+RUN mkdir /builds
+ADD https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py /build/tooltool.py
+RUN chmod +rx /build/tooltool.py
+
+# Add build scripts.
+ADD fetch_rust.sh build_rust.sh /build/
+ADD fetch_cargo.sh build_cargo.sh /build/
+ADD package_rust.sh upload_rust.sh /build/
+ADD repack_rust.py /build/
+RUN chmod +x /build/*
+
+# Create user for doing the build.
+ENV USER worker
+ENV HOME /home/${USER}
+
+RUN useradd -d ${HOME} -m ${USER}
+
+# Set up the user's tree
+WORKDIR ${HOME}
+
+# Invoke our build scripts by default, but allow other commands.
+USER ${USER}
+ENTRYPOINT /build/fetch_rust.sh && /build/build_rust.sh && \
+ /build/fetch_cargo.sh && /build/build_cargo.sh && \
+ /build/package_rust.sh && /build/upload_rust.sh
diff --git a/testing/docker/rust-build/README.md b/testing/docker/rust-build/README.md
new file mode 100644
index 000000000..3241051ec
--- /dev/null
+++ b/testing/docker/rust-build/README.md
@@ -0,0 +1,2 @@
+This is a docker script for building rust toolchains for
+use in Mozilla's build clusters.
diff --git a/testing/docker/rust-build/REGISTRY b/testing/docker/rust-build/REGISTRY
new file mode 100644
index 000000000..e32374498
--- /dev/null
+++ b/testing/docker/rust-build/REGISTRY
@@ -0,0 +1 @@
+quay.io/rust
diff --git a/testing/docker/rust-build/VERSION b/testing/docker/rust-build/VERSION
new file mode 100644
index 000000000..0d91a54c7
--- /dev/null
+++ b/testing/docker/rust-build/VERSION
@@ -0,0 +1 @@
+0.3.0
diff --git a/testing/docker/rust-build/build_cargo.sh b/testing/docker/rust-build/build_cargo.sh
new file mode 100644
index 000000000..33b56e6f9
--- /dev/null
+++ b/testing/docker/rust-build/build_cargo.sh
@@ -0,0 +1,20 @@
+#!/bin/bash -vex
+
+set -x -e
+
+: WORKSPACE ${WORKSPACE:=/home/worker}
+
+set -v
+
+# Configure and build cargo.
+
+if test $(uname -s) = "Darwin"; then
+ export MACOSX_DEPLOYMENT_TARGET=10.7
+fi
+
+pushd ${WORKSPACE}/cargo
+./configure --prefix=${WORKSPACE}/rustc --local-rust-root=${WORKSPACE}/rustc
+make
+make dist
+make install
+popd
diff --git a/testing/docker/rust-build/build_rust.sh b/testing/docker/rust-build/build_rust.sh
new file mode 100644
index 000000000..2f57128ae
--- /dev/null
+++ b/testing/docker/rust-build/build_rust.sh
@@ -0,0 +1,26 @@
+#!/bin/bash -vex
+
+set -x -e
+
+: WORKSPACE ${WORKSPACE:=/home/worker}
+
+CORES=$(nproc || grep -c ^processor /proc/cpuinfo || sysctl -n hw.ncpu)
+
+set -v
+
+# Configure and build rust.
+OPTIONS="--enable-llvm-static-stdcpp --disable-docs"
+OPTIONS+="--enable-debuginfo"
+OPTIONS+="--release-channel=stable"
+x32="i686-unknown-linux-gnu"
+x64="x86_64-unknown-linux-gnu"
+arm="arm-linux-androideabi"
+
+mkdir -p ${WORKSPACE}/rust-build
+pushd ${WORKSPACE}/rust-build
+${WORKSPACE}/rust/configure --prefix=${WORKSPACE}/rustc \
+ --target=${x64},${x32} ${OPTIONS}
+make -j ${CORES}
+make dist
+make install
+popd
diff --git a/testing/docker/rust-build/build_rust_mac.sh b/testing/docker/rust-build/build_rust_mac.sh
new file mode 100644
index 000000000..a6d44f6dd
--- /dev/null
+++ b/testing/docker/rust-build/build_rust_mac.sh
@@ -0,0 +1,36 @@
+#!/bin/bash -vex
+
+set -e
+
+: WORKSPACE ${WORKSPACE:=$PWD}
+: TOOLTOOL ${TOOLTOOL:=python $WORKSPACE/tooltool.py}
+
+CORES=$(nproc || grep -c ^processor /proc/cpuinfo || sysctl -n hw.ncpu)
+echo Building on $CORES cpus...
+
+OPTIONS="--enable-debuginfo --disable-docs"
+TARGETS="x86_64-apple-darwin,i686-apple-darwin"
+
+PREFIX=${WORKSPACE}/rustc
+
+set -v
+
+mkdir -p ${WORKSPACE}/gecko-rust-mac
+pushd ${WORKSPACE}/gecko-rust-mac
+
+export MACOSX_DEPLOYMENT_TARGET=10.7
+${WORKSPACE}/rust/configure --prefix=${PREFIX} --target=${TARGETS} ${OPTIONS}
+make -j ${CORES}
+
+rm -rf ${PREFIX}
+mkdir ${PREFIX}
+make dist
+make install
+popd
+
+# Package the toolchain for upload.
+pushd ${WORKSPACE}
+rustc/bin/rustc --version
+tar cvjf rustc.tar.bz2 rustc/*
+${TOOLTOOL} add --visibility=public --unpack rustc.tar.bz2
+popd
diff --git a/testing/docker/rust-build/fetch_cargo.sh b/testing/docker/rust-build/fetch_cargo.sh
new file mode 100644
index 000000000..c0fdb65d3
--- /dev/null
+++ b/testing/docker/rust-build/fetch_cargo.sh
@@ -0,0 +1,21 @@
+#!/bin/bash -vex
+
+set -x -e
+
+# Inputs, with defaults
+
+: REPOSITORY ${REPOSITORY:=https://github.com/rust-lang/cargo}
+: BRANCH ${BRANCH:=master}
+
+: WORKSPACE ${WORKSPACE:=/home/worker}
+
+set -v
+
+# Check out rust sources
+SRCDIR=${WORKSPACE}/cargo
+git clone --recursive $REPOSITORY -b $BRANCH ${SRCDIR}
+
+# Report version
+VERSION=$(git -C ${SRCDIR} describe --tags --dirty)
+COMMIT=$(git -C ${SRCDIR} rev-parse HEAD)
+echo "cargo ${VERSION} (commit ${COMMIT})" | tee cargo-version
diff --git a/testing/docker/rust-build/fetch_rust.sh b/testing/docker/rust-build/fetch_rust.sh
new file mode 100644
index 000000000..69a0d9bd9
--- /dev/null
+++ b/testing/docker/rust-build/fetch_rust.sh
@@ -0,0 +1,20 @@
+#!/bin/bash -vex
+
+set -x -e
+
+# Inputs, with defaults
+
+: RUST_REPOSITORY ${RUST_REPOSITORY:=https://github.com/rust-lang/rust}
+: RUST_BRANCH ${RUST_BRANCH:=stable}
+
+: WORKSPACE ${WORKSPACE:=/home/worker}
+
+set -v
+
+# Check out rust sources
+git clone $RUST_REPOSITORY -b $RUST_BRANCH ${WORKSPACE}/rust
+
+# Report version
+VERSION=$(git -C ${WORKSPACE}/rust describe --tags --dirty)
+COMMIT=$(git -C ${WORKSPACE}/rust rev-parse HEAD)
+echo "rust ${VERSION} (commit ${COMMIT})" | tee rust-version
diff --git a/testing/docker/rust-build/package_rust.sh b/testing/docker/rust-build/package_rust.sh
new file mode 100644
index 000000000..743aec2fb
--- /dev/null
+++ b/testing/docker/rust-build/package_rust.sh
@@ -0,0 +1,13 @@
+#!/bin/bash -vex
+
+set -x -e
+
+: WORKSPACE ${WORKSPACE:=/home/worker}
+
+set -v
+
+# Package the toolchain for upload.
+pushd ${WORKSPACE}
+tar cvJf rustc.tar.xz rustc/*
+/build/tooltool.py add --visibility=public --unpack rustc.tar.xz
+popd
diff --git a/testing/docker/rust-build/repack_rust.py b/testing/docker/rust-build/repack_rust.py
new file mode 100644
index 000000000..e0a5e89c5
--- /dev/null
+++ b/testing/docker/rust-build/repack_rust.py
@@ -0,0 +1,177 @@
+#!/bin/env python
+'''
+This script downloads and repacks official rust language builds
+with the necessary tool and target support for the Firefox
+build environment.
+'''
+
+import os.path
+import requests
+import subprocess
+import toml
+
+def fetch_file(url):
+ '''Download a file from the given url if it's not already present.'''
+ filename = os.path.basename(url)
+ if os.path.exists(filename):
+ return
+ r = requests.get(url, stream=True)
+ r.raise_for_status()
+ with open(filename, 'wb') as fd:
+ for chunk in r.iter_content(4096):
+ fd.write(chunk)
+
+def fetch(url):
+ '''Download and verify a package url.'''
+ base = os.path.basename(url)
+ print('Fetching %s...' % base)
+ fetch_file(url + '.asc')
+ fetch_file(url)
+ fetch_file(url + '.sha256')
+ fetch_file(url + '.asc.sha256')
+ print('Verifying %s...' % base)
+ subprocess.check_call(['shasum', '-c', base + '.sha256'])
+ subprocess.check_call(['shasum', '-c', base + '.asc.sha256'])
+ subprocess.check_call(['gpg', '--verify', base + '.asc', base])
+ subprocess.check_call(['keybase', 'pgp', 'verify',
+ '-d', base + '.asc',
+ '-i', base,
+ ])
+
+def install(filename, target):
+ '''Run a package's installer script against the given target directory.'''
+ print(' Unpacking %s...' % filename)
+ subprocess.check_call(['tar', 'xf', filename])
+ basename = filename.split('.tar')[0]
+ print(' Installing %s...' % basename)
+ install_cmd = [os.path.join(basename, 'install.sh')]
+ install_cmd += ['--prefix=' + os.path.abspath(target)]
+ install_cmd += ['--disable-ldconfig']
+ subprocess.check_call(install_cmd)
+ print(' Cleaning %s...' % basename)
+ subprocess.check_call(['rm', '-rf', basename])
+
+def package(manifest, pkg, target):
+ '''Pull out the package dict for a particular package and target
+ from the given manifest.'''
+ version = manifest['pkg'][pkg]['version']
+ info = manifest['pkg'][pkg]['target'][target]
+ return (version, info)
+
+def fetch_package(manifest, pkg, host):
+ version, info = package(manifest, pkg, host)
+ print('%s %s\n %s\n %s' % (pkg, version, info['url'], info['hash']))
+ if not info['available']:
+ print('%s marked unavailable for %s' % (pkg, host))
+ raise AssertionError
+ fetch(info['url'])
+ return info
+
+def fetch_std(manifest, targets):
+ stds = []
+ for target in targets:
+ info = fetch_package(manifest, 'rust-std', target)
+ stds.append(info)
+ return stds
+
+def tar_for_host(host):
+ if 'linux' in host:
+ tar_options = 'cJf'
+ tar_ext = '.tar.xz'
+ else:
+ tar_options = 'cjf'
+ tar_ext = '.tar.bz2'
+ return tar_options, tar_ext
+
+def repack(host, targets, channel='stable', suffix=''):
+ print("Repacking rust for %s..." % host)
+ url = 'https://static.rust-lang.org/dist/channel-rust-' + channel + '.toml'
+ req = requests.get(url)
+ req.raise_for_status()
+ manifest = toml.loads(req.content)
+ if manifest['manifest-version'] != '2':
+ print('ERROR: unrecognized manifest version %s.' % manifest['manifest-version'])
+ return
+ print('Using manifest for rust %s as of %s.' % (channel, manifest['date']))
+ print('Fetching packages...')
+ rustc = fetch_package(manifest, 'rustc', host)
+ cargo = fetch_package(manifest, 'cargo', host)
+ stds = fetch_std(manifest, targets)
+ print('Installing packages...')
+ tar_basename = 'rustc-' + host
+ if suffix:
+ tar_basename += '-' + suffix
+ tar_basename += '-repack'
+ install_dir = 'rustc'
+ subprocess.check_call(['rm', '-rf', install_dir])
+ install(os.path.basename(rustc['url']), install_dir)
+ install(os.path.basename(cargo['url']), install_dir)
+ for std in stds:
+ install(os.path.basename(std['url']), install_dir)
+ pass
+ print('Tarring %s...' % tar_basename)
+ tar_options, tar_ext = tar_for_host(host)
+ subprocess.check_call(['tar', tar_options, tar_basename + tar_ext, install_dir])
+ subprocess.check_call(['rm', '-rf', install_dir])
+
+def repack_cargo(host, channel='nightly'):
+ print("Repacking cargo for %s..." % host)
+ # Cargo doesn't seem to have a .toml manifest.
+ base_url = 'https://static.rust-lang.org/cargo-dist/'
+ req = requests.get(os.path.join(base_url, 'channel-cargo-' + channel))
+ req.raise_for_status()
+ file = ''
+ for line in req.iter_lines():
+ if line.find(host) != -1:
+ file = line.strip()
+ if not file:
+ print('No manifest entry for %s!' % host)
+ return
+ manifest = {
+ 'date': req.headers['Last-Modified'],
+ 'pkg': {
+ 'cargo': {
+ 'version': channel,
+ 'target': {
+ host: {
+ 'url': os.path.join(base_url, file),
+ 'hash': None,
+ 'available': True,
+ },
+ },
+ },
+ },
+ }
+ print('Using manifest for cargo %s.' % channel)
+ print('Fetching packages...')
+ cargo = fetch_package(manifest, 'cargo', host)
+ print('Installing packages...')
+ install_dir = 'cargo'
+ subprocess.check_call(['rm', '-rf', install_dir])
+ install(os.path.basename(cargo['url']), install_dir)
+ tar_basename = 'cargo-%s-repack' % host
+ print('Tarring %s...' % tar_basename)
+ tar_options, tar_ext = tar_for_host(host)
+ subprocess.check_call(['tar', tar_options, tar_basename + tar_ext, install_dir])
+ subprocess.check_call(['rm', '-rf', install_dir])
+
+# rust platform triples
+android="armv7-linux-androideabi"
+linux64="x86_64-unknown-linux-gnu"
+linux32="i686-unknown-linux-gnu"
+mac64="x86_64-apple-darwin"
+mac32="i686-apple-darwin"
+win64="x86_64-pc-windows-msvc"
+win32="i686-pc-windows-msvc"
+
+if __name__ == '__main__':
+ repack(mac64, [mac64, mac32])
+ repack(win32, [win32])
+ repack(win64, [win64])
+ repack(linux64, [linux64, linux32])
+ repack(linux64, [linux64, mac64, mac32], suffix='mac-cross')
+ repack(linux64, [linux64, android], suffix='android-cross')
+ repack_cargo(mac64)
+ repack_cargo(win32)
+ repack_cargo(win64)
+ repack_cargo(linux64)
diff --git a/testing/docker/rust-build/task.json b/testing/docker/rust-build/task.json
new file mode 100644
index 000000000..fd1ab872b
--- /dev/null
+++ b/testing/docker/rust-build/task.json
@@ -0,0 +1,37 @@
+{
+ "provisionerId": "aws-provisioner-v1",
+ "workerType": "rustbuild",
+ "created": "{task_created}",
+ "deadline": "{task_deadline}",
+ "payload": {
+ "image": "quay.io/rust/gecko-rust-build",
+ "env": {
+ "RUST_BRANCH": "{rust_branch}"
+ },
+ "artifacts": {
+ "public/rustc.tar.xz": {
+ "path": "/home/worker/rustc.tar.xz",
+ "expires": "{artifacts_expires}",
+ "type": "file"
+ },
+ "public/manifest.tt": {
+ "path": "/home/worker/manifest.tt",
+ "expires": "{artifacts_expires}",
+ "type": "file"
+ }
+ },
+ "features": {
+ "relengAPIProxy": true
+ },
+ "maxRunTime": 6000
+ },
+ "scopes": [
+ "docker-worker:relengapi-proxy:tooltool.upload.public"
+ ],
+ "metadata": {
+ "name": "Rust toolchain build",
+ "description": "Builds the rust toolchain for use in gecko builders.",
+ "owner": "giles@mozilla.com",
+ "source": "https://github.com/rillian/rust-build/"
+ }
+}
diff --git a/testing/docker/rust-build/tcbuild.py b/testing/docker/rust-build/tcbuild.py
new file mode 100644
index 000000000..d55c6f3a7
--- /dev/null
+++ b/testing/docker/rust-build/tcbuild.py
@@ -0,0 +1,206 @@
+#!/bin/env python
+'''
+This script triggers a taskcluster task, waits for it to finish,
+fetches the artifacts, uploads them to tooltool, and updates
+the in-tree tooltool manifests.
+'''
+
+from __future__ import print_function
+
+import requests.packages.urllib3
+requests.packages.urllib3.disable_warnings()
+
+import argparse
+import datetime
+import json
+import os
+import shutil
+import sys
+import taskcluster
+import tempfile
+import time
+import tooltool
+
+def local_file(filename):
+ '''
+ Return a path to a file next to this script.
+ '''
+ return os.path.join(os.path.dirname(__file__), filename)
+
+def read_tc_auth(tc_auth_file):
+ '''
+ Read taskcluster credentials from tc_auth_file and return them as a dict.
+ '''
+ return json.load(open(tc_auth_file, 'rb'))
+
+def fill_template_dict(d, keys):
+ for key, val in d.items():
+ if isinstance(val, basestring) and '{' in val:
+ d[key] = val.format(**keys)
+ elif isinstance(val, dict):
+ fill_template_dict(val, keys)
+
+def fill_template(template_file, keys):
+ '''
+ Take the file object template_file, parse it as JSON, and
+ interpolate (using str.template) its keys using keys.
+ '''
+ template = json.load(template_file)
+ fill_template_dict(template, keys)
+ return template
+
+def spawn_task(queue, args):
+ '''
+ Spawn a Taskcluster task in queue using args.
+ '''
+ task_id = taskcluster.utils.slugId()
+ with open(local_file('task.json'), 'rb') as template:
+ keys = vars(args)
+ now = datetime.datetime.utcnow()
+ keys['task_created'] = now.isoformat() + 'Z'
+ keys['task_deadline'] = (now + datetime.timedelta(hours=2)).isoformat() + 'Z'
+ keys['artifacts_expires'] = (now + datetime.timedelta(days=1)).isoformat() + 'Z'
+ payload = fill_template(template, keys)
+ queue.createTask(task_id, payload)
+ print('--- %s task %s submitted ---' % (now, task_id))
+ return task_id
+
+def wait_for_task(queue, task_id, initial_wait=5):
+ '''
+ Wait until queue reports that task task_id is completed, and return
+ its run id.
+
+ Sleep for initial_wait seconds before checking status the first time.
+ Then poll periodically and print a running log of the task status.
+ '''
+ time.sleep(initial_wait)
+ previous_state = None
+ have_ticks = False
+ while True:
+ res = queue.status(task_id)
+ state = res['status']['state']
+ if state != previous_state:
+ now = datetime.datetime.utcnow()
+ if have_ticks:
+ sys.stdout.write('\n')
+ have_ticks = False
+ print('--- %s task %s %s ---' % (now, task_id, state))
+ previous_state = state
+ if state == 'completed':
+ return len(res['status']['runs']) - 1
+ if state in ('failed', 'exception'):
+ raise Exception('Task failed')
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ have_ticks = True
+ time.sleep(10)
+
+def fetch_artifact(queue, task_id, run_id, name, dest_dir):
+ '''
+ Fetch the artifact with name from task_id and run_id in queue,
+ write it to a file in dest_dir, and return the path to the written
+ file.
+ '''
+ url = queue.buildUrl('getArtifact', task_id, run_id, name)
+ fn = os.path.join(dest_dir, os.path.basename(name))
+ print('Fetching %s...' % name)
+ try:
+ r = requests.get(url, stream=True)
+ r.raise_for_status()
+ with open(fn, 'wb') as f:
+ for chunk in r.iter_content(1024):
+ f.write(chunk)
+ except requests.exceptions.HTTPError:
+ print('HTTP Error %d fetching %s' % (r.status_code, name))
+ return None
+ return fn
+
+def make_artifact_dir(task_id, run_id):
+ prefix = 'tc-artifacts.%s.%d.' % (task_id, run_id)
+ print('making artifact dir %s' % prefix)
+ return tempfile.mkdtemp(prefix=prefix)
+
+def fetch_artifacts(queue, task_id, run_id):
+ '''
+ Fetch all artifacts from task_id and run_id in queue, write them to
+ temporary files, and yield the path to each.
+ '''
+ try:
+ tempdir = make_artifact_dir(task_id, run_id)
+ res = queue.listArtifacts(task_id, run_id)
+ for a in res['artifacts']:
+ # Skip logs
+ if a['name'].startswith('public/logs'):
+ continue
+ # Skip interfaces
+ if a['name'].startswith('private/docker-worker'):
+ continue
+ yield fetch_artifact(queue, task_id, run_id, a['name'], tempdir)
+ finally:
+ if os.path.isdir(tempdir):
+ #shutil.rmtree(tempdir)
+ print('Artifacts downloaded to %s' % tempdir)
+ pass
+
+def upload_to_tooltool(tooltool_auth, task_id, artifact):
+ '''
+ Upload artifact to tooltool using tooltool_auth as the authentication token.
+ Return the path to the generated tooltool manifest.
+ '''
+ try:
+ oldcwd = os.getcwd()
+ os.chdir(os.path.dirname(artifact))
+ manifest = artifact + '.manifest'
+ tooltool.main([
+ 'tooltool.py',
+ 'add',
+ '--visibility=public',
+ '-m', manifest,
+ artifact
+ ])
+ tooltool.main([
+ 'tooltool.py',
+ 'upload',
+ '-m', manifest,
+ '--authentication-file', tooltool_auth,
+ '--message', 'Built from taskcluster task {}'.format(task_id),
+ ])
+ return manifest
+ finally:
+ os.chdir(oldcwd)
+
+def update_manifest(artifact, manifest, local_gecko_clone):
+ platform = linux
+ manifest_dir = os.path.join(local_gecko_clone,
+ 'testing', 'config', 'tooltool-manifests')
+ platform_dir = [p for p in os.listdir(manifest_dir)
+ if p.startswith(platform)][0]
+ tree_manifest = os.path.join(manifest_dir, platform_dir, 'releng.manifest')
+ print('%s -> %s' % (manifest, tree_manifest))
+ shutil.copyfile(manifest, tree_manifest)
+
+def main():
+ parser = argparse.ArgumentParser(description='Build and upload binaries')
+ parser.add_argument('taskcluster_auth', help='Path to a file containing Taskcluster client ID and authentication token as a JSON file in the form {"clientId": "...", "accessToken": "..."}')
+ parser.add_argument('--tooltool-auth', help='Path to a file containing a tooltool authentication token valid for uploading files')
+ parser.add_argument('--local-gecko-clone', help='Path to a local Gecko clone whose tooltool manifests will be updated with the newly-built binaries')
+ parser.add_argument('--rust-branch', default='stable',
+ help='Revision of the rust repository to use')
+ parser.add_argument('--task', help='Use an existing task')
+
+ args = parser.parse_args()
+ tc_auth = read_tc_auth(args.taskcluster_auth)
+ queue = taskcluster.Queue({'credentials': tc_auth})
+ if args.task:
+ task_id, initial_wait = args.task, 0
+ else:
+ task_id, initial_wait = spawn_task(queue, args), 25
+ run_id = wait_for_task(queue, task_id, initial_wait)
+ for artifact in fetch_artifacts(queue, task_id, run_id):
+ if args.tooltool_auth:
+ manifest = upload_to_tooltool(args.tooltool_auth, task_id, artifact)
+ if args.local_gecko_clone:
+ update_manifest(artifact, manifest, args.local_gecko_clone)
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/docker/rust-build/upload_rust.sh b/testing/docker/rust-build/upload_rust.sh
new file mode 100644
index 000000000..d3a7b634e
--- /dev/null
+++ b/testing/docker/rust-build/upload_rust.sh
@@ -0,0 +1,22 @@
+#!/bin/bash -vex
+
+set -x -e
+
+: WORKSPACE ${WORKSPACE:=/home/worker}
+
+set -v
+
+# Upload artifacts packaged by the build script.
+pushd ${WORKSPACE}
+if test -n "$TASK_ID"; then
+ # If we're running on task cluster, use the upload-capable tunnel.
+ TOOLTOOL_OPTS="--url=http://relengapi/tooltool/"
+ MESSAGE="Taskcluster upload ${TASK_ID}/${RUN_ID} $0"
+else
+ MESSAGE="Rust toolchain build for gecko"
+fi
+if test -r rust-version; then
+ MESSAGE="$MESSAGE $(cat rust-version)"
+fi
+/build/tooltool.py upload ${TOOLTOOL_OPTS} --message="${MESSAGE}"
+popd
diff --git a/testing/docker/tester/Dockerfile b/testing/docker/tester/Dockerfile
new file mode 100644
index 000000000..547417ffa
--- /dev/null
+++ b/testing/docker/tester/Dockerfile
@@ -0,0 +1,33 @@
+FROM taskcluster/base-test:0.1.3
+MAINTAINER Jonas Finnemann Jensen <jopsen@gmail.com>
+
+# Add utilities and configuration
+COPY dot-config /home/worker/.config
+COPY dot-pulse /home/worker/.pulse
+COPY bin /home/worker/bin
+ADD https://s3-us-west-2.amazonaws.com/test-caching/packages/linux64-stackwalk /usr/local/bin/linux64-minidump_stackwalk
+ADD https://raw.githubusercontent.com/taskcluster/buildbot-step/master/buildbot_step /home/worker/bin/buildbot_step
+COPY tc-vcs-config.yml /etc/taskcluster-vcs.yml
+
+# Run test setup script
+RUN chmod u+x /home/worker/bin/buildbot_step
+RUN chmod u+x /usr/local/bin/linux64-minidump_stackwalk
+RUN apt-get install -y python-pip && pip install virtualenv;
+RUN mkdir Documents; mkdir Pictures; mkdir Music; mkdir Videos; mkdir artifacts
+RUN npm install -g taskcluster-vcs@2.3.12
+RUN npm install -g taskcluster-npm-cache@1.3.3
+RUN npm install -g node-gyp
+RUN rm -Rf .cache && mkdir -p .cache
+ENV PATH $PATH:/home/worker/bin
+ENV MINIDUMP_STACKWALK /usr/local/bin/linux64-minidump_stackwalk
+
+# Remove once running under 'worker' user. This is necessary for pulseaudio to start
+# XXX: change this back to worker:worker once permissions issues are resolved
+RUN chown -R root:root /home/worker
+
+
+# TODO Re-enable worker when bug 1093833 lands
+#USER worker
+
+# Set a default command useful for debugging
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/tester/REGISTRY b/testing/docker/tester/REGISTRY
new file mode 100644
index 000000000..cb1e1bb48
--- /dev/null
+++ b/testing/docker/tester/REGISTRY
@@ -0,0 +1 @@
+taskcluster
diff --git a/testing/docker/tester/VERSION b/testing/docker/tester/VERSION
new file mode 100644
index 000000000..cb498ab2c
--- /dev/null
+++ b/testing/docker/tester/VERSION
@@ -0,0 +1 @@
+0.4.8
diff --git a/testing/docker/tester/bin/test.sh b/testing/docker/tester/bin/test.sh
new file mode 100644
index 000000000..6a5c9de74
--- /dev/null
+++ b/testing/docker/tester/bin/test.sh
@@ -0,0 +1,31 @@
+#! /bin/bash -vex
+
+set -x -e
+
+: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=https://hg.mozilla.org/mozilla-central}
+: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default}
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+
+
+# TODO: when bug 1093833 is solved and tasks can run as non-root, reduce this
+# to a simple fail-if-root check
+if [ $(id -u) = 0 ]; then
+ chown -R worker:worker /home/worker
+ # drop privileges by re-running this script
+ exec sudo -E -u worker bash /home/worker/bin/test.sh "${@}"
+fi
+
+####
+# Now get the test-linux.sh script from the given Gecko tree and run it with
+# the same arguments.
+####
+
+[ -d $WORKSPACE ] || mkdir -p $WORKSPACE
+cd $WORKSPACE
+
+script=taskcluster/scripts/tester/test-b2g.sh
+url=${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/${script}
+curl --fail -o ./test-b2g.sh --retry 10 $url
+chmod +x ./test-b2g.sh
+exec ./test-b2g.sh "${@}"
+
diff --git a/testing/docker/tester/dot-config/pip/pip.conf b/testing/docker/tester/dot-config/pip/pip.conf
new file mode 100644
index 000000000..73c2b2a52
--- /dev/null
+++ b/testing/docker/tester/dot-config/pip/pip.conf
@@ -0,0 +1,2 @@
+[global]
+disable-pip-version-check = true
diff --git a/testing/docker/tester/dot-config/user-dirs.dirs b/testing/docker/tester/dot-config/user-dirs.dirs
new file mode 100644
index 000000000..0d19da4e4
--- /dev/null
+++ b/testing/docker/tester/dot-config/user-dirs.dirs
@@ -0,0 +1,15 @@
+# This file is written by xdg-user-dirs-update
+# If you want to change or add directories, just edit the line you're
+# interested in. All local changes will be retained on the next run
+# Format is XDG_xxx_DIR="$HOME/yyy", where yyy is a shell-escaped
+# homedir-relative path, or XDG_xxx_DIR="/yyy", where /yyy is an
+# absolute path. No other format is supported.
+#
+XDG_DESKTOP_DIR="$HOME/Desktop"
+XDG_DOWNLOAD_DIR="$HOME/Downloads"
+XDG_TEMPLATES_DIR="$HOME/Templates"
+XDG_PUBLICSHARE_DIR="$HOME/Public"
+XDG_DOCUMENTS_DIR="$HOME/Documents"
+XDG_MUSIC_DIR="$HOME/Music"
+XDG_PICTURES_DIR="$HOME/Pictures"
+XDG_VIDEOS_DIR="$HOME/Videos"
diff --git a/testing/docker/tester/dot-config/user-dirs.locale b/testing/docker/tester/dot-config/user-dirs.locale
new file mode 100644
index 000000000..7741b83a3
--- /dev/null
+++ b/testing/docker/tester/dot-config/user-dirs.locale
@@ -0,0 +1 @@
+en_US
diff --git a/testing/docker/tester/dot-pulse/default.pa b/testing/docker/tester/dot-pulse/default.pa
new file mode 100644
index 000000000..39bb44aa7
--- /dev/null
+++ b/testing/docker/tester/dot-pulse/default.pa
@@ -0,0 +1,164 @@
+#!/usr/bin/pulseaudio -nF
+#
+# This file is part of PulseAudio.
+#
+# PulseAudio is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# PulseAudio is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with PulseAudio; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+# This startup script is used only if PulseAudio is started per-user
+# (i.e. not in system mode)
+
+.nofail
+
+### Load something into the sample cache
+#load-sample-lazy x11-bell /usr/share/sounds/gtk-events/activate.wav
+#load-sample-lazy pulse-hotplug /usr/share/sounds/startup3.wav
+#load-sample-lazy pulse-coldplug /usr/share/sounds/startup3.wav
+#load-sample-lazy pulse-access /usr/share/sounds/generic.wav
+
+.fail
+
+### Automatically restore the volume of streams and devices
+load-module module-device-restore
+load-module module-stream-restore
+load-module module-card-restore
+
+### Automatically augment property information from .desktop files
+### stored in /usr/share/application
+load-module module-augment-properties
+
+### Load audio drivers statically
+### (it's probably better to not load these drivers manually, but instead
+### use module-udev-detect -- see below -- for doing this automatically)
+#load-module module-alsa-sink
+#load-module module-alsa-source device=hw:1,0
+#load-module module-oss device="/dev/dsp" sink_name=output source_name=input
+#load-module module-oss-mmap device="/dev/dsp" sink_name=output source_name=input
+#load-module module-null-sink
+#load-module module-pipe-sink
+
+### Automatically load driver modules depending on the hardware available
+.ifexists module-udev-detect.so
+load-module module-udev-detect
+.else
+### Use the static hardware detection module (for systems that lack udev/hal support)
+load-module module-detect
+.endif
+
+### Automatically connect sink and source if JACK server is present
+.ifexists module-jackdbus-detect.so
+.nofail
+load-module module-jackdbus-detect
+.fail
+.endif
+
+### Automatically load driver modules for Bluetooth hardware
+# This module causes a pulseaudio startup failure on "gecko-tester"
+#.ifexists module-bluetooth-discover.so
+#load-module module-bluetooth-discover
+#.endif
+
+### Load several protocols
+.ifexists module-esound-protocol-unix.so
+load-module module-esound-protocol-unix
+.endif
+load-module module-native-protocol-unix
+
+### Network access (may be configured with paprefs, so leave this commented
+### here if you plan to use paprefs)
+#load-module module-esound-protocol-tcp
+#load-module module-native-protocol-tcp
+#load-module module-zeroconf-publish
+
+### Load the RTP receiver module (also configured via paprefs, see above)
+#load-module module-rtp-recv
+
+### Load the RTP sender module (also configured via paprefs, see above)
+#load-module module-null-sink sink_name=rtp format=s16be channels=2 rate=44100 sink_properties="device.description='RTP Multicast Sink'"
+#load-module module-rtp-send source=rtp.monitor
+
+### Load additional modules from GConf settings. This can be configured with the paprefs tool.
+### Please keep in mind that the modules configured by paprefs might conflict with manually
+### loaded modules.
+.ifexists module-gconf.so
+.nofail
+load-module module-gconf
+.fail
+.endif
+
+### Automatically restore the default sink/source when changed by the user
+### during runtime
+### NOTE: This should be loaded as early as possible so that subsequent modules
+### that look up the default sink/source get the right value
+load-module module-default-device-restore
+
+### Automatically move streams to the default sink if the sink they are
+### connected to dies, similar for sources
+load-module module-rescue-streams
+
+### Make sure we always have a sink around, even if it is a null sink.
+load-module module-always-sink
+
+### Honour intended role device property
+load-module module-intended-roles
+
+### Automatically suspend sinks/sources that become idle for too long
+load-module module-suspend-on-idle
+
+### If autoexit on idle is enabled we want to make sure we only quit
+### when no local session needs us anymore.
+# This module causes a pulseaudio startup failure on "gecko-tester"
+#.ifexists module-console-kit.so
+#load-module module-console-kit
+#.endif
+
+### Enable positioned event sounds
+load-module module-position-event-sounds
+
+### Cork music streams when a phone stream is active
+#load-module module-cork-music-on-phone
+
+### Modules to allow autoloading of filters (such as echo cancellation)
+### on demand. module-filter-heuristics tries to determine what filters
+### make sense, and module-filter-apply does the heavy-lifting of
+### loading modules and rerouting streams.
+load-module module-filter-heuristics
+load-module module-filter-apply
+
+### Load DBus protocol
+#.ifexists module-dbus-protocol.so
+#load-module module-dbus-protocol
+#.endif
+
+# X11 modules should not be started from default.pa so that one daemon
+# can be shared by multiple sessions.
+
+### Load X11 bell module
+#load-module module-x11-bell sample=bell-windowing-system
+
+### Register ourselves in the X11 session manager
+#load-module module-x11-xsmp
+
+### Publish connection data in the X11 root window
+#.ifexists module-x11-publish.so
+#.nofail
+#load-module module-x11-publish
+#.fail
+#.endif
+
+load-module module-switch-on-port-available
+
+### Make some devices default
+#set-default-sink output
+#set-default-source input
diff --git a/testing/docker/tester/tc-vcs-config.yml b/testing/docker/tester/tc-vcs-config.yml
new file mode 100644
index 000000000..25e13ee40
--- /dev/null
+++ b/testing/docker/tester/tc-vcs-config.yml
@@ -0,0 +1,40 @@
+# Default configuration used by the tc-vs tools these can be overridden by
+# passing the config you wish to use over the command line...
+git: git
+hg: hg
+
+repoCache:
+ # Repo url to clone when running repo init..
+ repoUrl: https://gerrit.googlesource.com/git-repo.git
+ # Version of repo to utilize...
+ repoRevision: master
+ # The root where all downloaded cache files are stored on the local machine...
+ cacheDir: '{{env.HOME}}/.tc-vcs-repo/'
+ # Name/prefixed used as part of the base url.
+ cacheName: sources/{{name}}.tar.gz
+ # Command used to upload the tarball
+ uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'"
+ # Large http get requests are often slower using nodes built in http layer so
+ # we utilize a subprocess which is responsible for fetching...
+ get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}}
+ # Used to create clone tarball
+ compress: tar -czf {{dest}} {{source}}
+ # All cache urls use tar + gz this is the command used to extract those files
+ # downloaded by the "get" command.
+ extract: tar -x -z -C {{dest}} -f {{source}}
+
+cloneCache:
+ # The root where all downloaded cache files are stored on the local machine...
+ cacheDir: '{{env.HOME}}/.tc-vcs/'
+ # Command used to upload the tarball
+ uploadTar: "curl --header 'Content-Type: application/x-tar' --header 'Content-Encoding: gzip' -X PUT --data-binary @'{{source}}' '{{url}}'"
+ # Large http get requests are often slower using nodes built in http layer so
+ # we utilize a subprocess which is responsible for fetching...
+ get: curl --connect-timeout 30 --speed-limit 500000 -L -o {{dest}} {{url}}
+ # Used to create clone tarball
+ compress: tar -czf {{dest}} {{source}}
+ # All cache urls use tar + gz this is the command used to extract those files
+ # downloaded by the "get" command.
+ extract: tar -x -z --strip-components 1 -C {{dest}} -f {{source}}
+ # Name/prefixed used as part of the base url.
+ cacheName: clones/{{name}}.tar.gz
diff --git a/testing/docker/tester/tester.env b/testing/docker/tester/tester.env
new file mode 100644
index 000000000..1bcac6132
--- /dev/null
+++ b/testing/docker/tester/tester.env
@@ -0,0 +1,4 @@
+GAIA_REV=tip
+GAIA_REF=tip
+GAIA_BASE_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
+GAIA_HEAD_REPOSITORY=https://hg.mozilla.org/integration/gaia-central
diff --git a/testing/docker/upload-symbols/Dockerfile b/testing/docker/upload-symbols/Dockerfile
new file mode 100644
index 000000000..281995271
--- /dev/null
+++ b/testing/docker/upload-symbols/Dockerfile
@@ -0,0 +1,21 @@
+FROM ubuntu:14.04
+MAINTAINER Anthony Miyaguchi <amiyaguchi@mozilla.com>
+
+WORKDIR /tmp
+
+# Add the upload script
+ADD bin /tmp/bin
+RUN chmod +x /tmp/bin/*
+
+# Define the environmental variables for the scripts
+COPY socorro_token /tmp/
+ENV SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE /tmp/socorro_token
+ENV SCRIPT_PATH toolkit/crashreporter/tools/upload_symbols.py
+
+# Install dependencies for the script
+RUN apt-get update
+RUN apt-get install -y python python-pip wget
+RUN pip install redo requests
+
+# Default command
+CMD ["/bin/bash", "--login"]
diff --git a/testing/docker/upload-symbols/README.md b/testing/docker/upload-symbols/README.md
new file mode 100644
index 000000000..20e29fb3f
--- /dev/null
+++ b/testing/docker/upload-symbols/README.md
@@ -0,0 +1,28 @@
+# Upload Symbols
+Docker worker to upload crashreporter symbols as a separate taskcluster task.
+
+## Building
+`$ docker build -t upload_symbols .`
+
+`$ docker run -i -t upload_symbols`
+
+Then from inside the container, run:
+
+`$ ./bin/upload.sh`
+
+In order to run the `upload_symbols.py` script properly, the Dockerfile expects a text file `socorro_token` embedded with the api token at the root directory before.
+
+The following environmental variables must be set for a sucessful run.
+- `ARTIFACT_TASKID` : TaskId of the parent build task
+- `GECKO_HEAD_REPOSITORY` : The head repository to download the checkout script
+- `GECKO_HEAD_REV` : Revision of the head repository to look for
+
+## Example
+The container can be run similar to its production environment with the following command:
+```
+docker run -ti \
+-e ARTIFACT_TASKID=Hh5vLCaTRRO8Ql9X6XBdxg \
+-e GECKO_HEAD_REV=beed30cce69bc9783d417d3d29ce2c44989961ed \
+-e GECKO_HEAD_REPOSITORY=https://hg.mozilla.org/try/ \
+upload_symbols /bin/bash bin/upload.sh
+```
diff --git a/testing/docker/upload-symbols/bin/checkout-script.sh b/testing/docker/upload-symbols/bin/checkout-script.sh
new file mode 100755
index 000000000..c39778937
--- /dev/null
+++ b/testing/docker/upload-symbols/bin/checkout-script.sh
@@ -0,0 +1,16 @@
+#! /bin/bash -vex
+
+set -x -e
+
+# Inputs, with defaults
+
+: GECKO_HEAD_REPOSITORY ${GECKO_HEAD_REPOSITORY:=https://hg.mozilla.org/mozilla-central}
+: GECKO_HEAD_REV ${GECKO_HEAD_REV:=default}
+
+: SCRIPT_DOWNLOAD_PATH ${SCRIPT_DOWNLOAD_PATH:=$PWD}
+: SCRIPT_PATH ${SCRIPT_PATH:?"script path must be set"}
+set -v
+
+# download script from the gecko repository
+url=${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/${SCRIPT_PATH}
+wget --directory-prefix=${SCRIPT_DOWNLOAD_PATH} $url
diff --git a/testing/docker/upload-symbols/bin/upload.sh b/testing/docker/upload-symbols/bin/upload.sh
new file mode 100755
index 000000000..a0cc15c5b
--- /dev/null
+++ b/testing/docker/upload-symbols/bin/upload.sh
@@ -0,0 +1,21 @@
+#! /bin/bash
+
+set -e
+
+# checkout the script
+source $(dirname $0)/checkout-script.sh
+
+# Check that we have a taskid to checkout
+if [ -z ${ARTIFACT_TASKID} ]; then
+ echo "Please set ARTIFACT_TASKID. Exiting"
+ exit 0
+fi
+
+# grab the symbols from an arbitrary task
+symbol_url=https://queue.taskcluster.net/v1/task/${ARTIFACT_TASKID}/artifacts/public/build/target.crashreporter-symbols-full.zip
+wget ${symbol_url}
+
+# run
+symbol_zip=$(basename ${symbol_url})
+script_name=$(basename ${SCRIPT_PATH})
+python -u ${script_name} ${symbol_zip}
diff --git a/testing/docker/upload-symbols/test_exports.sh b/testing/docker/upload-symbols/test_exports.sh
new file mode 100755
index 000000000..acc0eb536
--- /dev/null
+++ b/testing/docker/upload-symbols/test_exports.sh
@@ -0,0 +1,6 @@
+#! /bin/bash
+export SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE=./socorro_token
+export ARTIFACT_TASKID=Hh5vLCaTRRO8Ql9X6XBdxg
+export GECKO_HEAD_REV=beed30cce69bc9783d417d3d29ce2c44989961ed
+export GECKO_HEAD_REPOSITORY=https://hg.mozilla.org/try/
+export SCRIPT_PATH=toolkit/crashreporter/tools/upload_symbols.py