summaryrefslogtreecommitdiffstats
path: root/testing/docker/recipes
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /testing/docker/recipes
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'testing/docker/recipes')
-rw-r--r--testing/docker/recipes/centos6-build-system-setup.sh11
-rw-r--r--testing/docker/recipes/common.sh10
-rw-r--r--testing/docker/recipes/install-mercurial.sh162
-rwxr-xr-xtesting/docker/recipes/run-task324
-rwxr-xr-xtesting/docker/recipes/tooltool.py1022
-rw-r--r--testing/docker/recipes/ubuntu1204-test-system-setup.sh279
-rw-r--r--testing/docker/recipes/ubuntu1604-test-system-setup.sh180
-rw-r--r--testing/docker/recipes/xvfb.sh75
8 files changed, 2063 insertions, 0 deletions
diff --git a/testing/docker/recipes/centos6-build-system-setup.sh b/testing/docker/recipes/centos6-build-system-setup.sh
new file mode 100644
index 000000000..bf1d2c78a
--- /dev/null
+++ b/testing/docker/recipes/centos6-build-system-setup.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+cd /setup
+
+. /setup/common.sh
+. /setup/install-mercurial.sh
+
+rm -rf /setup
diff --git a/testing/docker/recipes/common.sh b/testing/docker/recipes/common.sh
new file mode 100644
index 000000000..ca3fc6996
--- /dev/null
+++ b/testing/docker/recipes/common.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+tooltool_fetch() {
+ cat >manifest.tt
+ python /setup/tooltool.py fetch
+ rm manifest.tt
+}
diff --git a/testing/docker/recipes/install-mercurial.sh b/testing/docker/recipes/install-mercurial.sh
new file mode 100644
index 000000000..6311a6f53
--- /dev/null
+++ b/testing/docker/recipes/install-mercurial.sh
@@ -0,0 +1,162 @@
+#!/bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This script installs and configures Mercurial.
+
+set -e
+
+# Detect OS.
+if [ -f /etc/lsb-release ]; then
+ . /etc/lsb-release
+
+ if [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "16.04" ]; then
+ HG_DEB=1
+ HG_DIGEST=e891b46d8e97cb1c6b0c714e037ea78ae3043f49d27655332c615c861ebb94654a064298c7363d318edd7750c45574cc434848ae758adbcd2a41c6c390006053
+ HG_SIZE=159870
+ HG_FILENAME=mercurial_4.1.2_amd64.deb
+
+ HG_COMMON_DIGEST=112fab48805f267343c5757af5633ef51e4a8fcc7029b83afb7790ba9600ec185d4857dd1925c9aa724bc191f5f37039a59900b99f95e3427bf5d82c85447b69
+ HG_COMMON_SIZE=1919078
+ HG_COMMON_FILENAME=mercurial-common_4.1.2_all.deb
+ elif [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "12.04" ]; then
+ HG_DEB=1
+ HG_DIGEST=67823aa455c59dbdc24ec1f044b0afdb5c03520ef3601509cb5466dc0ac332846caf96176f07de501c568236f6909e55dfc8f4b02f8c69fa593a4abca9abfeb8
+ HG_SIZE=167880
+ HG_FILENAME=mercurial_4.1.2_amd64.deb
+
+ HG_COMMON_DIGEST=5e1c462a9b699d2068f7a0c14589f347ca719c216181ef7a625033df757185eeb3a8fed57986829a7943f16af5a8d66ddf457cc7fc4af557be88eb09486fe665
+ HG_COMMON_SIZE=3091596
+ HG_COMMON_FILENAME=mercurial-common_4.1.2_all.deb
+ fi
+
+ CERT_PATH=/etc/ssl/certs/ca-certificates.crt
+
+elif [ -f /etc/centos-release ]; then
+ CENTOS_VERSION=`rpm -q --queryformat '%{VERSION}' centos-release`
+ if [ "${CENTOS_VERSION}" = "6" ]; then
+ if [ -f /usr/bin/pip2.7 ]; then
+ PIP_PATH=/usr/bin/pip2.7
+ else
+ # The following RPM is "linked" against Python 2.6, which doesn't
+ # support TLS 1.2. Given the security implications of an insecure
+ # version control tool, we choose to prefer a Mercurial built using
+ # Python 2.7 that supports TLS 1.2. Before you uncomment the code
+ # below, think long and hard about the implications of limiting
+ # Mercurial to TLS 1.0.
+ #HG_RPM=1
+ #HG_DIGEST=c64e00c74402cd9c4ef9792177354fa6ff9c8103f41358f0eab2b15dba900d47d04ea582c6c6ebb80cf52495a28433987ffb67a5f39cd843b6638e3fa46921c8
+ #HG_SIZE=4437360
+ #HG_FILENAME=mercurial-4.1.2.x86_64.rpm
+ echo "We currently require Python 2.7 and /usr/bin/pip2.7 to run Mercurial"
+ exit 1
+ fi
+ else
+ echo "Unsupported CentOS version: ${CENTOS_VERSION}"
+ exit 1
+ fi
+
+ CERT_PATH=/etc/ssl/certs/ca-bundle.crt
+fi
+
+if [ -n "${HG_DEB}" ]; then
+tooltool_fetch <<EOF
+[
+{
+ "size": ${HG_SIZE},
+ "digest": "${HG_DIGEST}",
+ "algorithm": "sha512",
+ "filename": "${HG_FILENAME}"
+},
+{
+ "size": ${HG_COMMON_SIZE},
+ "digest": "${HG_COMMON_DIGEST}",
+ "algorithm": "sha512",
+ "filename": "${HG_COMMON_FILENAME}"
+}
+]
+EOF
+
+ dpkg -i ${HG_COMMON_FILENAME} ${HG_FILENAME}
+elif [ -n "${HG_RPM}" ]; then
+tooltool_fetch <<EOF
+[
+{
+ "size": ${HG_SIZE},
+ "digest": "${HG_DIGEST}",
+ "algorithm": "sha512",
+ "filename": "${HG_FILENAME}"
+}
+]
+EOF
+
+ rpm -i ${HG_FILENAME}
+elif [ -n "${PIP_PATH}" ]; then
+tooltool_fetch <<EOF
+[
+{
+"size": 5133417,
+"visibility": "public",
+"digest": "32b59d23d6b911b7a7e9c9c7659457daf2eba771d5170ad5a44a068d7941939e1d68c72c847e488bf26c14392e5d7ee25e5f660e0330250d0685acce40552745",
+"algorithm": "sha512",
+"filename": "mercurial-4.1.2.tar.gz"
+}
+]
+EOF
+
+ ${PIP_PATH} install mercurial-4.1.2.tar.gz
+else
+ echo "Do not know how to install Mercurial on this OS"
+ exit 1
+fi
+
+chmod 644 /usr/local/mercurial/robustcheckout.py
+
+mkdir -p /etc/mercurial
+cat >/etc/mercurial/hgrc <<EOF
+# By default the progress bar starts after 3s and updates every 0.1s. We
+# change this so it shows and updates every 1.0s.
+# We also tell progress to assume a TTY is present so updates are printed
+# even if there is no known TTY.
+[progress]
+delay = 1.0
+refresh = 1.0
+assume-tty = true
+
+[web]
+cacerts = ${CERT_PATH}
+
+[extensions]
+robustcheckout = /usr/local/mercurial/robustcheckout.py
+
+[hostsecurity]
+# When running a modern Python, Mercurial will default to TLS 1.1+.
+# When running on a legacy Python, Mercurial will default to TLS 1.0+.
+# There is no good reason we shouldn't be running a modern Python
+# capable of speaking TLS 1.2. And the only Mercurial servers we care
+# about should be running TLS 1.2. So make TLS 1.2 the minimum.
+minimumprotocol = tls1.2
+
+# Settings to make 1-click loaners more useful.
+[extensions]
+color =
+histedit =
+pager =
+rebase =
+
+[diff]
+git = 1
+showfunc = 1
+
+[pager]
+pager = LESS=FRSXQ less
+
+attend-help = true
+attend-incoming = true
+attend-log = true
+attend-outgoing = true
+attend-status = true
+EOF
+
+chmod 644 /etc/mercurial/hgrc
diff --git a/testing/docker/recipes/run-task b/testing/docker/recipes/run-task
new file mode 100755
index 000000000..978683cb5
--- /dev/null
+++ b/testing/docker/recipes/run-task
@@ -0,0 +1,324 @@
+#!/usr/bin/python2.7 -u
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Run a task after performing common actions.
+
+This script is meant to be the "driver" for TaskCluster based tasks.
+It receives some common arguments to control the run-time environment.
+
+It performs actions as requested from the arguments. Then it executes
+the requested process and prints its output, prefixing it with the
+current time to improve log usefulness.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import datetime
+import errno
+import grp
+import json
+import os
+import pwd
+import re
+import socket
+import stat
+import subprocess
+import sys
+import urllib2
+
+
+FINGERPRINT_URL = 'http://taskcluster/secrets/v1/secret/project/taskcluster/gecko/hgfingerprint'
+FALLBACK_FINGERPRINT = {
+ 'fingerprints':
+ "sha256:8e:ad:f7:6a:eb:44:06:15:ed:f3:e4:69:a6:64:60:37:2d:ff:98:88:37"
+ ":bf:d7:b8:40:84:01:48:9c:26:ce:d9"}
+
+
+def print_line(prefix, m):
+ now = datetime.datetime.utcnow()
+ print(b'[%s %sZ] %s' % (prefix, now.isoformat(), m), end=b'')
+
+
+def run_and_prefix_output(prefix, args, extra_env=None):
+ """Runs a process and prefixes its output with the time.
+
+ Returns the process exit code.
+ """
+ print_line(prefix, b'executing %s\n' % args)
+
+ env = dict(os.environ)
+ env.update(extra_env or {})
+
+ # Note: TaskCluster's stdin is a TTY. This attribute is lost
+ # when we pass sys.stdin to the invoked process. If we cared
+ # to preserve stdin as a TTY, we could make this work. But until
+ # someone needs it, don't bother.
+ p = subprocess.Popen(args,
+ # Disable buffering because we want to receive output
+ # as it is generated so timestamps in logs are
+ # accurate.
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ stdin=sys.stdin.fileno(),
+ cwd='/',
+ env=env,
+ # So \r in progress bars are rendered as multiple
+ # lines, preserving progress indicators.
+ universal_newlines=True)
+
+ while True:
+ data = p.stdout.readline()
+ if data == b'':
+ break
+
+ print_line(prefix, data)
+
+ return p.wait()
+
+
+def vcs_checkout(source_repo, dest, store_path,
+ base_repo=None, revision=None, branch=None):
+ # Specify method to checkout a revision. This defaults to revisions as
+ # SHA-1 strings, but also supports symbolic revisions like `tip` via the
+ # branch flag.
+ if revision:
+ revision_flag = b'--revision'
+ revision_value = revision
+ elif branch:
+ revision_flag = b'--branch'
+ revision_value = branch
+ else:
+ print('revision is not specified for checkout')
+ sys.exit(1)
+
+ # Obtain certificate fingerprints.
+ try:
+ print_line(b'vcs', 'fetching hg.mozilla.org fingerprint from %s\n' %
+ FINGERPRINT_URL)
+ res = urllib2.urlopen(FINGERPRINT_URL, timeout=10)
+ secret = res.read()
+ try:
+ secret = json.loads(secret, encoding='utf-8')
+ except ValueError:
+ print_line(b'vcs', 'invalid JSON in hg fingerprint secret')
+ sys.exit(1)
+ except (urllib2.URLError, socket.timeout):
+ print_line(b'vcs', 'Unable to retrieve current hg.mozilla.org fingerprint'
+ 'using the secret service, using fallback instead.')
+ # XXX This fingerprint will not be accurate if running on an old
+ # revision after the server fingerprint has changed.
+ secret = {'secret': FALLBACK_FINGERPRINT}
+
+ hgmo_fingerprint = secret['secret']['fingerprints'].encode('ascii')
+
+ args = [
+ b'/usr/bin/hg',
+ b'--config', b'hostsecurity.hg.mozilla.org:fingerprints=%s' % hgmo_fingerprint,
+ b'robustcheckout',
+ b'--sharebase', store_path,
+ b'--purge',
+ ]
+
+ if base_repo:
+ args.extend([b'--upstream', base_repo])
+
+ args.extend([
+ revision_flag, revision_value,
+ source_repo, dest,
+ ])
+
+ res = run_and_prefix_output(b'vcs', args,
+ extra_env={b'PYTHONUNBUFFERED': b'1'})
+ if res:
+ sys.exit(res)
+
+ # Update the current revision hash and ensure that it is well formed.
+ revision = subprocess.check_output(
+ [b'/usr/bin/hg', b'log',
+ b'--rev', b'.',
+ b'--template', b'{node}'],
+ cwd=dest)
+
+ assert re.match('^[a-f0-9]{40}$', revision)
+ return revision
+
+
+def main(args):
+ print_line(b'setup', b'run-task started\n')
+
+ if os.getuid() != 0:
+ print('assertion failed: not running as root')
+ return 1
+
+ # Arguments up to '--' are ours. After are for the main task
+ # to be executed.
+ try:
+ i = args.index('--')
+ our_args = args[0:i]
+ task_args = args[i + 1:]
+ except ValueError:
+ our_args = args
+ task_args = []
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--user', default='worker', help='user to run as')
+ parser.add_argument('--group', default='worker', help='group to run as')
+ # We allow paths to be chowned by the --user:--group before permissions are
+ # dropped. This is often necessary for caches/volumes, since they default
+ # to root:root ownership.
+ parser.add_argument('--chown', action='append',
+ help='Directory to chown to --user:--group')
+ parser.add_argument('--chown-recursive', action='append',
+ help='Directory to recursively chown to --user:--group')
+ parser.add_argument('--vcs-checkout',
+ help='Directory where Gecko checkout should be created')
+ parser.add_argument('--tools-checkout',
+ help='Directory where build/tools checkout should be created')
+
+ args = parser.parse_args(our_args)
+
+ try:
+ user = pwd.getpwnam(args.user)
+ except KeyError:
+ print('could not find user %s; specify --user to a known user' %
+ args.user)
+ return 1
+ try:
+ group = grp.getgrnam(args.group)
+ except KeyError:
+ print('could not find group %s; specify --group to a known group' %
+ args.group)
+ return 1
+
+ uid = user.pw_uid
+ gid = group.gr_gid
+
+ # Find all groups to which this user is a member.
+ gids = [g.gr_gid for g in grp.getgrall() if args.group in g.gr_mem]
+
+ wanted_dir_mode = stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
+
+ def set_dir_permissions(path, uid, gid):
+ st = os.lstat(path)
+
+ if st.st_uid != uid or st.st_gid != gid:
+ os.chown(path, uid, gid)
+
+ # Also make sure dirs are writable in case we need to delete
+ # them.
+ if st.st_mode & wanted_dir_mode != wanted_dir_mode:
+ os.chmod(path, st.st_mode | wanted_dir_mode)
+
+ # Change ownership of requested paths.
+ # FUTURE: parse argument values for user/group if we don't want to
+ # use --user/--group.
+ for path in args.chown or []:
+ print_line(b'chown', b'changing ownership of %s to %s:%s\n' % (
+ path, user.pw_name, group.gr_name))
+ set_dir_permissions(path, uid, gid)
+
+ for path in args.chown_recursive or []:
+ print_line(b'chown', b'recursively changing ownership of %s to %s:%s\n' %
+ (path, user.pw_name, group.gr_name))
+
+ set_dir_permissions(path, uid, gid)
+
+ for root, dirs, files in os.walk(path):
+ for d in dirs:
+ set_dir_permissions(os.path.join(root, d), uid, gid)
+
+ for f in files:
+ # File may be a symlink that points to nowhere. In which case
+ # os.chown() would fail because it attempts to follow the
+ # symlink. We only care about directory entries, not what
+ # they point to. So setting the owner of the symlink should
+ # be sufficient.
+ os.lchown(os.path.join(root, f), uid, gid)
+
+ def prepare_checkout_dir(checkout):
+ if not checkout:
+ return
+
+ # Ensure the directory for the source checkout exists.
+ try:
+ os.makedirs(os.path.dirname(checkout))
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ # And that it is owned by the appropriate user/group.
+ os.chown(os.path.dirname(checkout), uid, gid)
+
+ # And ensure the shared store path exists and has proper permissions.
+ if 'HG_STORE_PATH' not in os.environ:
+ print('error: HG_STORE_PATH environment variable not set')
+ sys.exit(1)
+
+ store_path = os.environ['HG_STORE_PATH']
+ try:
+ os.makedirs(store_path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ os.chown(store_path, uid, gid)
+
+ prepare_checkout_dir(args.vcs_checkout)
+ prepare_checkout_dir(args.tools_checkout)
+
+ # Drop permissions to requested user.
+ # This code is modeled after what `sudo` was observed to do in a Docker
+ # container. We do not bother calling setrlimit() because containers have
+ # their own limits.
+ print_line(b'setup', b'running as %s:%s\n' % (args.user, args.group))
+ os.setgroups(gids)
+ os.umask(022)
+ os.setresgid(gid, gid, gid)
+ os.setresuid(uid, uid, uid)
+
+ # Checkout the repository, setting the GECKO_HEAD_REV to the current
+ # revision hash. Revision hashes have priority over symbolic revisions. We
+ # disallow running tasks with symbolic revisions unless they have been
+ # resolved by a checkout.
+ if args.vcs_checkout:
+ base_repo = os.environ.get('GECKO_BASE_REPOSITORY')
+ # Some callers set the base repository to mozilla-central for historical
+ # reasons. Switch to mozilla-unified because robustcheckout works best
+ # with it.
+ if base_repo == 'https://hg.mozilla.org/mozilla-central':
+ base_repo = b'https://hg.mozilla.org/mozilla-unified'
+
+ os.environ['GECKO_HEAD_REV'] = vcs_checkout(
+ os.environ['GECKO_HEAD_REPOSITORY'],
+ args.vcs_checkout,
+ os.environ['HG_STORE_PATH'],
+ base_repo=base_repo,
+ revision=os.environ.get('GECKO_HEAD_REV'),
+ branch=os.environ.get('GECKO_HEAD_REF'))
+
+ elif not os.environ.get('GECKO_HEAD_REV') and \
+ os.environ.get('GECKO_HEAD_REF'):
+ print('task should be defined in terms of non-symbolic revision')
+ return 1
+
+ if args.tools_checkout:
+ vcs_checkout(b'https://hg.mozilla.org/build/tools',
+ args.tools_checkout,
+ os.environ['HG_STORE_PATH'],
+ # Always check out the latest commit on default branch.
+ # This is non-deterministic!
+ branch=b'default')
+
+ return run_and_prefix_output(b'task', task_args)
+
+
+if __name__ == '__main__':
+ # Unbuffer stdio.
+ sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
+ sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
+
+ sys.exit(main(sys.argv[1:]))
diff --git a/testing/docker/recipes/tooltool.py b/testing/docker/recipes/tooltool.py
new file mode 100755
index 000000000..952f9a5a7
--- /dev/null
+++ b/testing/docker/recipes/tooltool.py
@@ -0,0 +1,1022 @@
+#!/usr/bin/env python
+
+# tooltool is a lookaside cache implemented in Python
+# Copyright (C) 2011 John H. Ford <john@johnford.info>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation version 2
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# A manifest file specifies files in that directory that are stored
+# elsewhere. This file should only list files in the same directory
+# in which the manifest file resides and it should be called
+# 'manifest.tt'
+
+import hashlib
+import httplib
+import json
+import logging
+import optparse
+import os
+import shutil
+import sys
+import tarfile
+import tempfile
+import threading
+import time
+import urllib2
+import urlparse
+import zipfile
+
+from subprocess import PIPE
+from subprocess import Popen
+
+__version__ = '1'
+
+DEFAULT_MANIFEST_NAME = 'manifest.tt'
+TOOLTOOL_PACKAGE_SUFFIX = '.TOOLTOOL-PACKAGE'
+
+
+log = logging.getLogger(__name__)
+
+
+class FileRecordJSONEncoderException(Exception):
+ pass
+
+
+class InvalidManifest(Exception):
+ pass
+
+
+class ExceptionWithFilename(Exception):
+
+ def __init__(self, filename):
+ Exception.__init__(self)
+ self.filename = filename
+
+
+class BadFilenameException(ExceptionWithFilename):
+ pass
+
+
+class DigestMismatchException(ExceptionWithFilename):
+ pass
+
+
+class MissingFileException(ExceptionWithFilename):
+ pass
+
+
+class FileRecord(object):
+
+ def __init__(self, filename, size, digest, algorithm, unpack=False,
+ visibility=None, setup=None):
+ object.__init__(self)
+ if '/' in filename or '\\' in filename:
+ log.error(
+ "The filename provided contains path information and is, therefore, invalid.")
+ raise BadFilenameException(filename=filename)
+ self.filename = filename
+ self.size = size
+ self.digest = digest
+ self.algorithm = algorithm
+ self.unpack = unpack
+ self.visibility = visibility
+ self.setup = setup
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if self.filename == other.filename and \
+ self.size == other.size and \
+ self.digest == other.digest and \
+ self.algorithm == other.algorithm and \
+ self.visibility == other.visibility:
+ return True
+ else:
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return repr(self)
+
+ def __repr__(self):
+ return "%s.%s(filename='%s', size=%s, digest='%s', algorithm='%s', visibility=%r)" % (
+ __name__, self.__class__.__name__, self.filename, self.size,
+ self.digest, self.algorithm, self.visibility)
+
+ def present(self):
+ # Doesn't check validity
+ return os.path.exists(self.filename)
+
+ def validate_size(self):
+ if self.present():
+ return self.size == os.path.getsize(self.filename)
+ else:
+ log.debug(
+ "trying to validate size on a missing file, %s", self.filename)
+ raise MissingFileException(filename=self.filename)
+
+ def validate_digest(self):
+ if self.present():
+ with open(self.filename, 'rb') as f:
+ return self.digest == digest_file(f, self.algorithm)
+ else:
+ log.debug(
+ "trying to validate digest on a missing file, %s', self.filename")
+ raise MissingFileException(filename=self.filename)
+
+ def validate(self):
+ if self.validate_size():
+ if self.validate_digest():
+ return True
+ return False
+
+ def describe(self):
+ if self.present() and self.validate():
+ return "'%s' is present and valid" % self.filename
+ elif self.present():
+ return "'%s' is present and invalid" % self.filename
+ else:
+ return "'%s' is absent" % self.filename
+
+
+def create_file_record(filename, algorithm):
+ fo = open(filename, 'rb')
+ stored_filename = os.path.split(filename)[1]
+ fr = FileRecord(stored_filename, os.path.getsize(
+ filename), digest_file(fo, algorithm), algorithm)
+ fo.close()
+ return fr
+
+
+class FileRecordJSONEncoder(json.JSONEncoder):
+
+ def encode_file_record(self, obj):
+ if not issubclass(type(obj), FileRecord):
+ err = "FileRecordJSONEncoder is only for FileRecord and lists of FileRecords, " \
+ "not %s" % obj.__class__.__name__
+ log.warn(err)
+ raise FileRecordJSONEncoderException(err)
+ else:
+ rv = {
+ 'filename': obj.filename,
+ 'size': obj.size,
+ 'algorithm': obj.algorithm,
+ 'digest': obj.digest,
+ }
+ if obj.unpack:
+ rv['unpack'] = True
+ if obj.visibility is not None:
+ rv['visibility'] = obj.visibility
+ if obj.setup:
+ rv['setup'] = obj.setup
+ return rv
+
+ def default(self, f):
+ if issubclass(type(f), list):
+ record_list = []
+ for i in f:
+ record_list.append(self.encode_file_record(i))
+ return record_list
+ else:
+ return self.encode_file_record(f)
+
+
+class FileRecordJSONDecoder(json.JSONDecoder):
+
+ """I help the json module materialize a FileRecord from
+ a JSON file. I understand FileRecords and lists of
+ FileRecords. I ignore things that I don't expect for now"""
+ # TODO: make this more explicit in what it's looking for
+ # and error out on unexpected things
+
+ def process_file_records(self, obj):
+ if isinstance(obj, list):
+ record_list = []
+ for i in obj:
+ record = self.process_file_records(i)
+ if issubclass(type(record), FileRecord):
+ record_list.append(record)
+ return record_list
+ required_fields = [
+ 'filename',
+ 'size',
+ 'algorithm',
+ 'digest',
+ ]
+ if isinstance(obj, dict):
+ missing = False
+ for req in required_fields:
+ if req not in obj:
+ missing = True
+ break
+
+ if not missing:
+ unpack = obj.get('unpack', False)
+ visibility = obj.get('visibility', None)
+ setup = obj.get('setup')
+ rv = FileRecord(
+ obj['filename'], obj['size'], obj['digest'], obj['algorithm'],
+ unpack, visibility, setup)
+ log.debug("materialized %s" % rv)
+ return rv
+ return obj
+
+ def decode(self, s):
+ decoded = json.JSONDecoder.decode(self, s)
+ rv = self.process_file_records(decoded)
+ return rv
+
+
+class Manifest(object):
+
+ valid_formats = ('json',)
+
+ def __init__(self, file_records=None):
+ self.file_records = file_records or []
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if len(self.file_records) != len(other.file_records):
+ log.debug('Manifests differ in number of files')
+ return False
+ # sort the file records by filename before comparing
+ mine = sorted((fr.filename, fr) for fr in self.file_records)
+ theirs = sorted((fr.filename, fr) for fr in other.file_records)
+ return mine == theirs
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __deepcopy__(self, memo):
+ # This is required for a deep copy
+ return Manifest(self.file_records[:])
+
+ def __copy__(self):
+ return Manifest(self.file_records)
+
+ def copy(self):
+ return Manifest(self.file_records[:])
+
+ def present(self):
+ return all(i.present() for i in self.file_records)
+
+ def validate_sizes(self):
+ return all(i.validate_size() for i in self.file_records)
+
+ def validate_digests(self):
+ return all(i.validate_digest() for i in self.file_records)
+
+ def validate(self):
+ return all(i.validate() for i in self.file_records)
+
+ def load(self, data_file, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ try:
+ self.file_records.extend(
+ json.load(data_file, cls=FileRecordJSONDecoder))
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def loads(self, data_string, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ try:
+ self.file_records.extend(
+ json.loads(data_string, cls=FileRecordJSONDecoder))
+ except ValueError:
+ raise InvalidManifest("trying to read invalid manifest file")
+
+ def dump(self, output_file, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ rv = json.dump(
+ self.file_records, output_file, indent=0, cls=FileRecordJSONEncoder,
+ separators=(',', ': '))
+ print >> output_file, ''
+ return rv
+
+ def dumps(self, fmt='json'):
+ assert fmt in self.valid_formats
+ if fmt == 'json':
+ return json.dumps(self.file_records, cls=FileRecordJSONEncoder)
+
+
+def digest_file(f, a):
+ """I take a file like object 'f' and return a hex-string containing
+ of the result of the algorithm 'a' applied to 'f'."""
+ h = hashlib.new(a)
+ chunk_size = 1024 * 10
+ data = f.read(chunk_size)
+ while data:
+ h.update(data)
+ data = f.read(chunk_size)
+ name = repr(f.name) if hasattr(f, 'name') else 'a file'
+ log.debug('hashed %s with %s to be %s', name, a, h.hexdigest())
+ return h.hexdigest()
+
+
+def execute(cmd):
+ """Execute CMD, logging its stdout at the info level"""
+ process = Popen(cmd, shell=True, stdout=PIPE)
+ while True:
+ line = process.stdout.readline()
+ if not line:
+ break
+ log.info(line.replace('\n', ' '))
+ return process.wait() == 0
+
+
+def open_manifest(manifest_file):
+ """I know how to take a filename and load it into a Manifest object"""
+ if os.path.exists(manifest_file):
+ manifest = Manifest()
+ with open(manifest_file, "rb") as f:
+ manifest.load(f)
+ log.debug("loaded manifest from file '%s'" % manifest_file)
+ return manifest
+ else:
+ log.debug("tried to load absent file '%s' as manifest" % manifest_file)
+ raise InvalidManifest(
+ "manifest file '%s' does not exist" % manifest_file)
+
+
+def list_manifest(manifest_file):
+ """I know how print all the files in a location"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error("failed to load manifest file at '%s': %s" % (
+ manifest_file,
+ str(e),
+ ))
+ return False
+ for f in manifest.file_records:
+ print "%s\t%s\t%s" % ("P" if f.present() else "-",
+ "V" if f.present() and f.validate() else "-",
+ f.filename)
+ return True
+
+
+def validate_manifest(manifest_file):
+ """I validate that all files in a manifest are present and valid but
+ don't fetch or delete them if they aren't"""
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error("failed to load manifest file at '%s': %s" % (
+ manifest_file,
+ str(e),
+ ))
+ return False
+ invalid_files = []
+ absent_files = []
+ for f in manifest.file_records:
+ if not f.present():
+ absent_files.append(f)
+ else:
+ if not f.validate():
+ invalid_files.append(f)
+ if len(invalid_files + absent_files) == 0:
+ return True
+ else:
+ return False
+
+
+def add_files(manifest_file, algorithm, filenames, visibility, unpack):
+ # returns True if all files successfully added, False if not
+ # and doesn't catch library Exceptions. If any files are already
+ # tracked in the manifest, return will be False because they weren't
+ # added
+ all_files_added = True
+ # Create a old_manifest object to add to
+ if os.path.exists(manifest_file):
+ old_manifest = open_manifest(manifest_file)
+ else:
+ old_manifest = Manifest()
+ log.debug("creating a new manifest file")
+ new_manifest = Manifest() # use a different manifest for the output
+ for filename in filenames:
+ log.debug("adding %s" % filename)
+ path, name = os.path.split(filename)
+ new_fr = create_file_record(filename, algorithm)
+ new_fr.visibility = visibility
+ new_fr.unpack = unpack
+ log.debug("appending a new file record to manifest file")
+ add = True
+ for fr in old_manifest.file_records:
+ log.debug("manifest file has '%s'" % "', ".join(
+ [x.filename for x in old_manifest.file_records]))
+ if new_fr == fr:
+ log.info("file already in old_manifest")
+ add = False
+ elif filename == fr.filename:
+ log.error("manifest already contains a different file named %s" % filename)
+ add = False
+ if add:
+ new_manifest.file_records.append(new_fr)
+ log.debug("added '%s' to manifest" % filename)
+ else:
+ all_files_added = False
+ # copy any files in the old manifest that aren't in the new one
+ new_filenames = set(fr.filename for fr in new_manifest.file_records)
+ for old_fr in old_manifest.file_records:
+ if old_fr.filename not in new_filenames:
+ new_manifest.file_records.append(old_fr)
+ with open(manifest_file, 'wb') as output:
+ new_manifest.dump(output, fmt='json')
+ return all_files_added
+
+
+def touch(f):
+ """Used to modify mtime in cached files;
+ mtime is used by the purge command"""
+ try:
+ os.utime(f, None)
+ except OSError:
+ log.warn('impossible to update utime of file %s' % f)
+
+
+def fetch_file(base_urls, file_record, grabchunk=1024 * 4, auth_file=None, region=None):
+ # A file which is requested to be fetched that exists locally will be
+ # overwritten by this function
+ fd, temp_path = tempfile.mkstemp(dir=os.getcwd())
+ os.close(fd)
+ fetched_path = None
+ for base_url in base_urls:
+ # Generate the URL for the file on the server side
+ url = urlparse.urljoin(base_url,
+ '%s/%s' % (file_record.algorithm, file_record.digest))
+ if region is not None:
+ url += '?region=' + region
+
+ log.info("Attempting to fetch from '%s'..." % base_url)
+
+ # Well, the file doesn't exist locally. Let's fetch it.
+ try:
+ req = urllib2.Request(url)
+ _authorize(req, auth_file)
+ f = urllib2.urlopen(req)
+ log.debug("opened %s for reading" % url)
+ with open(temp_path, 'wb') as out:
+ k = True
+ size = 0
+ while k:
+ # TODO: print statistics as file transfers happen both for info and to stop
+ # buildbot timeouts
+ indata = f.read(grabchunk)
+ out.write(indata)
+ size += len(indata)
+ if indata == '':
+ k = False
+ log.info("File %s fetched from %s as %s" %
+ (file_record.filename, base_url, temp_path))
+ fetched_path = temp_path
+ break
+ except (urllib2.URLError, urllib2.HTTPError, ValueError) as e:
+ log.info("...failed to fetch '%s' from %s" %
+ (file_record.filename, base_url))
+ log.debug("%s" % e)
+ except IOError: # pragma: no cover
+ log.info("failed to write to temporary file for '%s'" %
+ file_record.filename, exc_info=True)
+
+ # cleanup temp file in case of issues
+ if fetched_path:
+ return os.path.split(fetched_path)[1]
+ else:
+ try:
+ os.remove(temp_path)
+ except OSError: # pragma: no cover
+ pass
+ return None
+
+
+def clean_path(dirname):
+ """Remove a subtree if is exists. Helper for unpack_file()."""
+ if os.path.exists(dirname):
+ log.info('rm tree: %s' % dirname)
+ shutil.rmtree(dirname)
+
+
+def unpack_file(filename, setup=None):
+ """Untar `filename`, assuming it is uncompressed or compressed with bzip2,
+ xz, gzip, or unzip a zip file. The file is assumed to contain a single
+ directory with a name matching the base of the given filename.
+ Xz support is handled by shelling out to 'tar'."""
+ if tarfile.is_tarfile(filename):
+ tar_file, zip_ext = os.path.splitext(filename)
+ base_file, tar_ext = os.path.splitext(tar_file)
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ tar = tarfile.open(filename)
+ tar.extractall()
+ tar.close()
+ elif filename.endswith('.tar.xz'):
+ base_file = filename.replace('.tar.xz', '')
+ clean_path(base_file)
+ log.info('untarring "%s"' % filename)
+ if not execute('tar -Jxf %s 2>&1' % filename):
+ return False
+ elif zipfile.is_zipfile(filename):
+ base_file = filename.replace('.zip', '')
+ clean_path(base_file)
+ log.info('unzipping "%s"' % filename)
+ z = zipfile.ZipFile(filename)
+ z.extractall()
+ z.close()
+ else:
+ log.error("Unknown archive extension for filename '%s'" % filename)
+ return False
+
+ if setup and not execute(os.path.join(base_file, setup)):
+ return False
+ return True
+
+
+def fetch_files(manifest_file, base_urls, filenames=[], cache_folder=None,
+ auth_file=None, region=None):
+ # Lets load the manifest file
+ try:
+ manifest = open_manifest(manifest_file)
+ except InvalidManifest as e:
+ log.error("failed to load manifest file at '%s': %s" % (
+ manifest_file,
+ str(e),
+ ))
+ return False
+
+ # we want to track files already in current working directory AND valid
+ # we will not need to fetch these
+ present_files = []
+
+ # We want to track files that fail to be fetched as well as
+ # files that are fetched
+ failed_files = []
+ fetched_files = []
+
+ # Files that we want to unpack.
+ unpack_files = []
+
+ # Setup for unpacked files.
+ setup_files = {}
+
+ # Lets go through the manifest and fetch the files that we want
+ for f in manifest.file_records:
+ # case 1: files are already present
+ if f.present():
+ if f.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # we have an invalid file here, better to cleanup!
+ # this invalid file needs to be replaced with a good one
+ # from the local cash or fetched from a tooltool server
+ log.info("File %s is present locally but it is invalid, so I will remove it "
+ "and try to fetch it" % f.filename)
+ os.remove(os.path.join(os.getcwd(), f.filename))
+
+ # check if file is already in cache
+ if cache_folder and f.filename not in present_files:
+ try:
+ shutil.copy(os.path.join(cache_folder, f.digest),
+ os.path.join(os.getcwd(), f.filename))
+ log.info("File %s retrieved from local cache %s" %
+ (f.filename, cache_folder))
+ touch(os.path.join(cache_folder, f.digest))
+
+ filerecord_for_validation = FileRecord(
+ f.filename, f.size, f.digest, f.algorithm)
+ if filerecord_for_validation.validate():
+ present_files.append(f.filename)
+ if f.unpack:
+ unpack_files.append(f.filename)
+ else:
+ # the file copied from the cache is invalid, better to
+ # clean up the cache version itself as well
+ log.warn("File %s retrieved from cache is invalid! I am deleting it from the "
+ "cache as well" % f.filename)
+ os.remove(os.path.join(os.getcwd(), f.filename))
+ os.remove(os.path.join(cache_folder, f.digest))
+ except IOError:
+ log.info("File %s not present in local cache folder %s" %
+ (f.filename, cache_folder))
+
+ # now I will try to fetch all files which are not already present and
+ # valid, appending a suffix to avoid race conditions
+ temp_file_name = None
+ # 'filenames' is the list of filenames to be managed, if this variable
+ # is a non empty list it can be used to filter if filename is in
+ # present_files, it means that I have it already because it was already
+ # either in the working dir or in the cache
+ if (f.filename in filenames or len(filenames) == 0) and f.filename not in present_files:
+ log.debug("fetching %s" % f.filename)
+ temp_file_name = fetch_file(base_urls, f, auth_file=auth_file, region=region)
+ if temp_file_name:
+ fetched_files.append((f, temp_file_name))
+ else:
+ failed_files.append(f.filename)
+ else:
+ log.debug("skipping %s" % f.filename)
+
+ if f.setup:
+ if f.unpack:
+ setup_files[f.filename] = f.setup
+ else:
+ log.error("'setup' requires 'unpack' being set for %s" % f.filename)
+ failed_files.append(f.filename)
+
+ # lets ensure that fetched files match what the manifest specified
+ for localfile, temp_file_name in fetched_files:
+ # since I downloaded to a temp file, I need to perform all validations on the temp file
+ # this is why filerecord_for_validation is created
+
+ filerecord_for_validation = FileRecord(
+ temp_file_name, localfile.size, localfile.digest, localfile.algorithm)
+
+ if filerecord_for_validation.validate():
+ # great!
+ # I can rename the temp file
+ log.info("File integrity verified, renaming %s to %s" %
+ (temp_file_name, localfile.filename))
+ os.rename(os.path.join(os.getcwd(), temp_file_name),
+ os.path.join(os.getcwd(), localfile.filename))
+
+ if localfile.unpack:
+ unpack_files.append(localfile.filename)
+
+ # if I am using a cache and a new file has just been retrieved from a
+ # remote location, I need to update the cache as well
+ if cache_folder:
+ log.info("Updating local cache %s..." % cache_folder)
+ try:
+ if not os.path.exists(cache_folder):
+ log.info("Creating cache in %s..." % cache_folder)
+ os.makedirs(cache_folder, 0700)
+ shutil.copy(os.path.join(os.getcwd(), localfile.filename),
+ os.path.join(cache_folder, localfile.digest))
+ log.info("Local cache %s updated with %s" % (cache_folder,
+ localfile.filename))
+ touch(os.path.join(cache_folder, localfile.digest))
+ except (OSError, IOError):
+ log.warning('Impossible to add file %s to cache folder %s' %
+ (localfile.filename, cache_folder), exc_info=True)
+ else:
+ failed_files.append(localfile.filename)
+ log.error("'%s'" % filerecord_for_validation.describe())
+ os.remove(temp_file_name)
+
+ # Unpack files that need to be unpacked.
+ for filename in unpack_files:
+ if not unpack_file(filename, setup_files.get(filename)):
+ failed_files.append(filename)
+
+ # If we failed to fetch or validate a file, we need to fail
+ if len(failed_files) > 0:
+ log.error("The following files failed: '%s'" %
+ "', ".join(failed_files))
+ return False
+ return True
+
+
+def freespace(p):
+ "Returns the number of bytes free under directory `p`"
+ if sys.platform == 'win32': # pragma: no cover
+ # os.statvfs doesn't work on Windows
+ import win32file
+
+ secsPerClus, bytesPerSec, nFreeClus, totClus = win32file.GetDiskFreeSpace(
+ p)
+ return secsPerClus * bytesPerSec * nFreeClus
+ else:
+ r = os.statvfs(p)
+ return r.f_frsize * r.f_bavail
+
+
+def purge(folder, gigs):
+ """If gigs is non 0, it deletes files in `folder` until `gigs` GB are free,
+ starting from older files. If gigs is 0, a full purge will be performed.
+ No recursive deletion of files in subfolder is performed."""
+
+ full_purge = bool(gigs == 0)
+ gigs *= 1024 * 1024 * 1024
+
+ if not full_purge and freespace(folder) >= gigs:
+ log.info("No need to cleanup")
+ return
+
+ files = []
+ for f in os.listdir(folder):
+ p = os.path.join(folder, f)
+ # it deletes files in folder without going into subfolders,
+ # assuming the cache has a flat structure
+ if not os.path.isfile(p):
+ continue
+ mtime = os.path.getmtime(p)
+ files.append((mtime, p))
+
+ # iterate files sorted by mtime
+ for _, f in sorted(files):
+ log.info("removing %s to free up space" % f)
+ try:
+ os.remove(f)
+ except OSError:
+ log.info("Impossible to remove %s" % f, exc_info=True)
+ if not full_purge and freespace(folder) >= gigs:
+ break
+
+
+def _log_api_error(e):
+ if hasattr(e, 'hdrs') and e.hdrs['content-type'] == 'application/json':
+ json_resp = json.load(e.fp)
+ log.error("%s: %s" % (json_resp['error']['name'],
+ json_resp['error']['description']))
+ else:
+ log.exception("Error making RelengAPI request:")
+
+
+def _authorize(req, auth_file):
+ if auth_file:
+ log.debug("using bearer token in %s" % auth_file)
+ req.add_unredirected_header('Authorization',
+ 'Bearer %s' % (open(auth_file, "rb").read().strip()))
+
+
+def _send_batch(base_url, auth_file, batch, region):
+ url = urlparse.urljoin(base_url, 'upload')
+ if region is not None:
+ url += "?region=" + region
+ req = urllib2.Request(url, json.dumps(batch), {'Content-Type': 'application/json'})
+ _authorize(req, auth_file)
+ try:
+ resp = urllib2.urlopen(req)
+ except (urllib2.URLError, urllib2.HTTPError) as e:
+ _log_api_error(e)
+ return None
+ return json.load(resp)['result']
+
+
+def _s3_upload(filename, file):
+ # urllib2 does not support streaming, so we fall back to good old httplib
+ url = urlparse.urlparse(file['put_url'])
+ cls = httplib.HTTPSConnection if url.scheme == 'https' else httplib.HTTPConnection
+ host, port = url.netloc.split(':') if ':' in url.netloc else (url.netloc, 443)
+ port = int(port)
+ conn = cls(host, port)
+ try:
+ req_path = "%s?%s" % (url.path, url.query) if url.query else url.path
+ conn.request('PUT', req_path, open(filename, "rb"),
+ {'Content-type': 'application/octet-stream'})
+ resp = conn.getresponse()
+ resp_body = resp.read()
+ conn.close()
+ if resp.status != 200:
+ raise RuntimeError("Non-200 return from AWS: %s %s\n%s" %
+ (resp.status, resp.reason, resp_body))
+ except Exception:
+ file['upload_exception'] = sys.exc_info()
+ file['upload_ok'] = False
+ else:
+ file['upload_ok'] = True
+
+
+def _notify_upload_complete(base_url, auth_file, file):
+ req = urllib2.Request(
+ urlparse.urljoin(
+ base_url,
+ 'upload/complete/%(algorithm)s/%(digest)s' % file))
+ _authorize(req, auth_file)
+ try:
+ urllib2.urlopen(req)
+ except urllib2.HTTPError as e:
+ if e.code != 409:
+ _log_api_error(e)
+ return
+ # 409 indicates that the upload URL hasn't expired yet and we
+ # should retry after a delay
+ to_wait = int(e.headers.get('X-Retry-After', 60))
+ log.warning("Waiting %d seconds for upload URLs to expire" % to_wait)
+ time.sleep(to_wait)
+ _notify_upload_complete(base_url, auth_file, file)
+ except Exception:
+ log.exception("While notifying server of upload completion:")
+
+
+def upload(manifest, message, base_urls, auth_file, region):
+ try:
+ manifest = open_manifest(manifest)
+ except InvalidManifest:
+ log.exception("failed to load manifest file at '%s'")
+ return False
+
+ # verify the manifest, since we'll need the files present to upload
+ if not manifest.validate():
+ log.error('manifest is invalid')
+ return False
+
+ if any(fr.visibility is None for fr in manifest.file_records):
+ log.error('All files in a manifest for upload must have a visibility set')
+
+ # convert the manifest to an upload batch
+ batch = {
+ 'message': message,
+ 'files': {},
+ }
+ for fr in manifest.file_records:
+ batch['files'][fr.filename] = {
+ 'size': fr.size,
+ 'digest': fr.digest,
+ 'algorithm': fr.algorithm,
+ 'visibility': fr.visibility,
+ }
+
+ # make the upload request
+ resp = _send_batch(base_urls[0], auth_file, batch, region)
+ if not resp:
+ return None
+ files = resp['files']
+
+ # Upload the files, each in a thread. This allows us to start all of the
+ # uploads before any of the URLs expire.
+ threads = {}
+ for filename, file in files.iteritems():
+ if 'put_url' in file:
+ log.info("%s: starting upload" % (filename,))
+ thd = threading.Thread(target=_s3_upload,
+ args=(filename, file))
+ thd.daemon = 1
+ thd.start()
+ threads[filename] = thd
+ else:
+ log.info("%s: already exists on server" % (filename,))
+
+ # re-join all of those threads as they exit
+ success = True
+ while threads:
+ for filename, thread in threads.items():
+ if not thread.is_alive():
+ # _s3_upload has annotated file with result information
+ file = files[filename]
+ thread.join()
+ if file['upload_ok']:
+ log.info("%s: uploaded" % filename)
+ else:
+ log.error("%s: failed" % filename,
+ exc_info=file['upload_exception'])
+ success = False
+ del threads[filename]
+
+ # notify the server that the uploads are completed. If the notification
+ # fails, we don't consider that an error (the server will notice
+ # eventually)
+ for filename, file in files.iteritems():
+ if 'put_url' in file and file['upload_ok']:
+ log.info("notifying server of upload completion for %s" % (filename,))
+ _notify_upload_complete(base_urls[0], auth_file, file)
+
+ return success
+
+
+def process_command(options, args):
+ """ I know how to take a list of program arguments and
+ start doing the right thing with them"""
+ cmd = args[0]
+ cmd_args = args[1:]
+ log.debug("processing '%s' command with args '%s'" %
+ (cmd, '", "'.join(cmd_args)))
+ log.debug("using options: %s" % options)
+
+ if cmd == 'list':
+ return list_manifest(options['manifest'])
+ if cmd == 'validate':
+ return validate_manifest(options['manifest'])
+ elif cmd == 'add':
+ return add_files(options['manifest'], options['algorithm'], cmd_args,
+ options['visibility'], options['unpack'])
+ elif cmd == 'purge':
+ if options['cache_folder']:
+ purge(folder=options['cache_folder'], gigs=options['size'])
+ else:
+ log.critical('please specify the cache folder to be purged')
+ return False
+ elif cmd == 'fetch':
+ return fetch_files(
+ options['manifest'],
+ options['base_url'],
+ cmd_args,
+ cache_folder=options['cache_folder'],
+ auth_file=options.get("auth_file"),
+ region=options.get('region'))
+ elif cmd == 'upload':
+ if not options.get('message'):
+ log.critical('upload command requires a message')
+ return False
+ return upload(
+ options.get('manifest'),
+ options.get('message'),
+ options.get('base_url'),
+ options.get('auth_file'),
+ options.get('region'))
+ else:
+ log.critical('command "%s" is not implemented' % cmd)
+ return False
+
+
+def main(argv, _skip_logging=False):
+ # Set up option parsing
+ parser = optparse.OptionParser()
+ parser.add_option('-q', '--quiet', default=logging.INFO,
+ dest='loglevel', action='store_const', const=logging.ERROR)
+ parser.add_option('-v', '--verbose',
+ dest='loglevel', action='store_const', const=logging.DEBUG)
+ parser.add_option('-m', '--manifest', default=DEFAULT_MANIFEST_NAME,
+ dest='manifest', action='store',
+ help='specify the manifest file to be operated on')
+ parser.add_option('-d', '--algorithm', default='sha512',
+ dest='algorithm', action='store',
+ help='hashing algorithm to use (only sha512 is allowed)')
+ parser.add_option('--visibility', default=None,
+ dest='visibility', choices=['internal', 'public'],
+ help='Visibility level of this file; "internal" is for '
+ 'files that cannot be distributed out of the company '
+ 'but not for secrets; "public" files are available to '
+ 'anyone withou trestriction')
+ parser.add_option('--unpack', default=False,
+ dest='unpack', action='store_true',
+ help='Request unpacking this file after fetch.'
+ ' This is helpful with tarballs.')
+ parser.add_option('-o', '--overwrite', default=False,
+ dest='overwrite', action='store_true',
+ help='UNUSED; present for backward compatibility')
+ parser.add_option('--url', dest='base_url', action='append',
+ help='RelengAPI URL ending with /tooltool/; default '
+ 'is appropriate for Mozilla')
+ parser.add_option('-c', '--cache-folder', dest='cache_folder',
+ help='Local cache folder')
+ parser.add_option('-s', '--size',
+ help='free space required (in GB)', dest='size',
+ type='float', default=0.)
+ parser.add_option('-r', '--region', help='Preferred AWS region for upload or fetch; '
+ 'example: --region=us-west-2')
+ parser.add_option('--message',
+ help='The "commit message" for an upload; format with a bug number '
+ 'and brief comment',
+ dest='message')
+ parser.add_option('--authentication-file',
+ help='Use the RelengAPI token found in the given file to '
+ 'authenticate to the RelengAPI server.',
+ dest='auth_file')
+
+ (options_obj, args) = parser.parse_args(argv[1:])
+
+ # default the options list if not provided
+ if not options_obj.base_url:
+ options_obj.base_url = ['https://api.pub.build.mozilla.org/tooltool/']
+
+ # ensure all URLs have a trailing slash
+ def add_slash(url):
+ return url if url.endswith('/') else (url + '/')
+ options_obj.base_url = [add_slash(u) for u in options_obj.base_url]
+
+ # expand ~ in --authentication-file
+ if options_obj.auth_file:
+ options_obj.auth_file = os.path.expanduser(options_obj.auth_file)
+
+ # Dictionaries are easier to work with
+ options = vars(options_obj)
+
+ log.setLevel(options['loglevel'])
+
+ # Set up logging, for now just to the console
+ if not _skip_logging: # pragma: no cover
+ ch = logging.StreamHandler()
+ cf = logging.Formatter("%(levelname)s - %(message)s")
+ ch.setFormatter(cf)
+ log.addHandler(ch)
+
+ if options['algorithm'] != 'sha512':
+ parser.error('only --algorithm sha512 is supported')
+
+ if len(args) < 1:
+ parser.error('You must specify a command')
+
+ return 0 if process_command(options, args) else 1
+
+if __name__ == "__main__": # pragma: no cover
+ sys.exit(main(sys.argv))
diff --git a/testing/docker/recipes/ubuntu1204-test-system-setup.sh b/testing/docker/recipes/ubuntu1204-test-system-setup.sh
new file mode 100644
index 000000000..4edcf00a1
--- /dev/null
+++ b/testing/docker/recipes/ubuntu1204-test-system-setup.sh
@@ -0,0 +1,279 @@
+#!/usr/bin/env bash
+
+set -ve
+
+test `whoami` == 'root'
+
+mkdir -p /setup
+cd /setup
+
+apt_packages=()
+
+apt_packages+=('alsa-base')
+apt_packages+=('alsa-utils')
+apt_packages+=('autoconf2.13')
+apt_packages+=('bluez-alsa')
+apt_packages+=('bluez-alsa:i386')
+apt_packages+=('bluez-cups')
+apt_packages+=('bluez-gstreamer')
+apt_packages+=('build-essential')
+apt_packages+=('ca-certificates')
+apt_packages+=('ccache')
+apt_packages+=('curl')
+apt_packages+=('fonts-kacst')
+apt_packages+=('fonts-kacst-one')
+apt_packages+=('fonts-liberation')
+apt_packages+=('fonts-stix')
+apt_packages+=('fonts-unfonts-core')
+apt_packages+=('fonts-unfonts-extra')
+apt_packages+=('fonts-vlgothic')
+apt_packages+=('g++-multilib')
+apt_packages+=('gcc-multilib')
+apt_packages+=('gir1.2-gnomebluetooth-1.0')
+apt_packages+=('git')
+apt_packages+=('gstreamer0.10-alsa')
+apt_packages+=('gstreamer0.10-ffmpeg')
+apt_packages+=('gstreamer0.10-plugins-bad')
+apt_packages+=('gstreamer0.10-plugins-base')
+apt_packages+=('gstreamer0.10-plugins-good')
+apt_packages+=('gstreamer0.10-plugins-ugly')
+apt_packages+=('gstreamer0.10-tools')
+apt_packages+=('language-pack-en-base')
+apt_packages+=('libasound2-dev')
+apt_packages+=('libasound2-plugins:i386')
+apt_packages+=('libcanberra-pulse')
+apt_packages+=('libcurl4-openssl-dev')
+apt_packages+=('libdbus-1-dev')
+apt_packages+=('libdbus-glib-1-dev')
+apt_packages+=('libdrm-intel1:i386')
+apt_packages+=('libdrm-nouveau1a:i386')
+apt_packages+=('libdrm-radeon1:i386')
+apt_packages+=('libdrm2:i386')
+apt_packages+=('libexpat1:i386')
+apt_packages+=('libgconf2-dev')
+apt_packages+=('libgnome-bluetooth8')
+apt_packages+=('libgstreamer-plugins-base0.10-dev')
+apt_packages+=('libgstreamer0.10-dev')
+apt_packages+=('libgtk2.0-dev')
+apt_packages+=('libiw-dev')
+apt_packages+=('libllvm2.9')
+apt_packages+=('libllvm3.0:i386')
+apt_packages+=('libncurses5:i386')
+apt_packages+=('libnotify-dev')
+apt_packages+=('libpulse-dev')
+apt_packages+=('libpulse-mainloop-glib0:i386')
+apt_packages+=('libpulsedsp:i386')
+apt_packages+=('libsdl1.2debian:i386')
+apt_packages+=('libsox-fmt-alsa')
+apt_packages+=('libx11-xcb1:i386')
+apt_packages+=('libxdamage1:i386')
+apt_packages+=('libxfixes3:i386')
+apt_packages+=('libxt-dev')
+apt_packages+=('libxxf86vm1')
+apt_packages+=('libxxf86vm1:i386')
+apt_packages+=('llvm')
+apt_packages+=('llvm-2.9')
+apt_packages+=('llvm-2.9-dev')
+apt_packages+=('llvm-2.9-runtime')
+apt_packages+=('llvm-dev')
+apt_packages+=('llvm-runtime')
+apt_packages+=('nano')
+apt_packages+=('pulseaudio')
+apt_packages+=('pulseaudio-module-X11')
+apt_packages+=('pulseaudio-module-bluetooth')
+apt_packages+=('pulseaudio-module-gconf')
+apt_packages+=('rlwrap')
+apt_packages+=('screen')
+apt_packages+=('software-properties-common')
+apt_packages+=('sudo')
+apt_packages+=('tar')
+apt_packages+=('ttf-arphic-uming')
+apt_packages+=('ttf-dejavu')
+apt_packages+=('ttf-indic-fonts-core')
+apt_packages+=('ttf-kannada-fonts')
+apt_packages+=('ttf-oriya-fonts')
+apt_packages+=('ttf-paktype')
+apt_packages+=('ttf-punjabi-fonts')
+apt_packages+=('ttf-sazanami-mincho')
+apt_packages+=('ubuntu-desktop')
+apt_packages+=('unzip')
+apt_packages+=('uuid')
+apt_packages+=('vim')
+apt_packages+=('wget')
+apt_packages+=('xvfb')
+apt_packages+=('yasm')
+apt_packages+=('zip')
+
+# get xvinfo for test-linux.sh to monitor Xvfb startup
+apt_packages+=('x11-utils')
+
+# Bug 1232407 - this allows the user to start vnc
+apt_packages+=('x11vnc')
+
+# Bug 1176031: need `xset` to disable screensavers
+apt_packages+=('x11-xserver-utils')
+
+# use Ubuntu's Python-2.7 (2.7.3 on Precise)
+apt_packages+=('python-dev')
+apt_packages+=('python-pip')
+
+apt-get update
+# This allows ubuntu-desktop to be installed without human interaction
+export DEBIAN_FRONTEND=noninteractive
+apt-get install -y --force-yes ${apt_packages[@]}
+
+dpkg-reconfigure locales
+
+tooltool_fetch() {
+ cat >manifest.tt
+ python /setup/tooltool.py fetch
+ rm manifest.tt
+}
+
+. /tmp/install-mercurial.sh
+
+# install peep
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 26912,
+ "digest": "9d730ed7852d4d217aaddda959cd5f871ef1b26dd6c513a3780bbb04a5a93a49d6b78e95c2274451a1311c10cc0a72755b269dc9af62640474e6e73a1abec370",
+ "algorithm": "sha512",
+ "filename": "peep-2.4.1.tar.gz",
+ "unpack": false
+}
+]
+EOF
+pip install peep-2.4.1.tar.gz
+
+# remaining Python utilities are installed with `peep` from upstream
+# repositories; peep verifies file integrity for us
+cat >requirements.txt <<'EOF'
+# wheel
+# sha256: 90pZQ6kAXB6Je8-H9-ivfgDAb6l3e5rWkfafn6VKh9g
+# tarball:
+# sha256: qryO8YzdvYoqnH-SvEPi_qVLEUczDWXbkg7zzpgS49w
+virtualenv==13.1.2
+EOF
+peep install -r requirements.txt
+
+# Install node
+wget https://nodejs.org/dist/v5.0.0/node-v5.0.0-linux-x64.tar.gz
+echo 'ef73b59048a0ed11d01633f0061627b7a9879257deb9add2255e4d0808f8b671 node-v5.0.0-linux-x64.tar.gz' | sha256sum -c
+tar -C /usr/local -xz --strip-components 1 < node-v5.0.0-linux-x64.tar.gz
+node -v # verify
+
+# Install custom-built Debian packages. These come from a set of repositories
+# packaged in tarballs on tooltool to make them replicable. Because they have
+# inter-dependenices, we install all repositories first, then perform the
+# installation.
+cp /etc/apt/sources.list sources.list.orig
+
+# Install a slightly newer version of libxcb
+# See bugs 975216 and 1334641 for the original build of these packages
+# NOTE: if you're re-creating this, the tarball contains an `update.sh` which will rebuild the repository.
+tooltool_fetch <<'EOF'
+[
+ {
+ "size": 9711517,
+ "visibility": "public",
+ "digest": "ecbcebfb409ad9f7f2a9b6b058e20d49e45b3fd5d94dac59e94ff9a54844611f715230468af506a10a5cd62df6df74fdf0e126d43f6bec743eb803ded0740da7",
+ "algorithm": "sha512",
+ "filename": "xcb-repo-1.8.1-2ubuntu2.1mozilla2.tgz"
+ }
+]
+EOF
+tar -zxf xcb-repo-*.tgz
+echo "deb file://$PWD/xcb precise all" >> /etc/apt/sources.list
+
+# Install a patched version of mesa, per bug 1227637. Origin of the packages themselves is unknown, as
+# these binaries were copied from the apt repositories used by puppet. Ask rail for more information.
+# NOTE: if you're re-creating this, the tarball contains an `update.sh` which will rebuild the repository.
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 590643702,
+ "visibility": "public",
+ "digest": "f03b11987c218e57073d1b7eec6cc0a753d48f600df8dde0a35fa7c4d4d30b3891c9cbcaee38ade23f038e72951cb15f0dca3f7c76cbf5bad5526baf13e91929",
+ "algorithm": "sha512",
+ "filename": "mesa-repo-9.2.1-1ubuntu3~precise1mozilla2.tgz"
+}
+]
+EOF
+tar -zxf mesa-repo-*.tgz
+echo "deb file://$PWD/mesa precise all" >> /etc/apt/sources.list
+
+# Install Valgrind (trunk, late Jan 2016) and do some crude sanity
+# checks. It has to go in /usr/local, otherwise it won't work. Copy
+# the launcher binary to /usr/bin, though, so that direct invokations
+# of /usr/bin/valgrind also work. Also install libc6-dbg since
+# Valgrind won't work at all without the debug symbols for libc.so and
+# ld.so being available.
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 41331092,
+ "visibility": "public",
+ "digest": "a89393c39171b8304fc262094a650df9a756543ffe9fbec935911e7b86842c4828b9b831698f97612abb0eca95cf7f7b3ff33ea7a9b0313b30c9be413a5efffc",
+ "algorithm": "sha512",
+ "filename": "valgrind-15775-3206-ubuntu1204.tgz"
+}
+]
+EOF
+cp valgrind-15775-3206-ubuntu1204.tgz /tmp
+(cd / && tar xzf /tmp/valgrind-15775-3206-ubuntu1204.tgz)
+rm /tmp/valgrind-15775-3206-ubuntu1204.tgz
+cp /usr/local/bin/valgrind /usr/bin/valgrind
+apt-get install -y libc6-dbg
+valgrind --version
+valgrind date
+
+# Fetch the minidump_stackwalk binary specified by the in-tree tooltool manifest.
+python /setup/tooltool.py fetch -m /tmp/minidump_stackwalk.manifest
+rm /tmp/minidump_stackwalk.manifest
+mv linux64-minidump_stackwalk /usr/local/bin/
+chmod +x /usr/local/bin/linux64-minidump_stackwalk
+
+apt-get update
+
+apt-get -q -y --force-yes install \
+ libxcb1 \
+ libxcb-render0 \
+ libxcb-shm0 \
+ libxcb-glx0 \
+ libxcb-shape0 libxcb-glx0:i386
+libxcb1_version=$(dpkg-query -s libxcb1 | grep ^Version | awk '{ print $2 }')
+[ "$libxcb1_version" = "1.8.1-2ubuntu2.1mozilla2" ] || exit 1
+
+apt-get -q -y --force-yes install \
+ libgl1-mesa-dev-lts-saucy:i386 \
+ libgl1-mesa-dri-lts-saucy \
+ libgl1-mesa-dri-lts-saucy:i386 \
+ libgl1-mesa-glx-lts-saucy \
+ libgl1-mesa-glx-lts-saucy:i386 \
+ libglapi-mesa-lts-saucy \
+ libglapi-mesa-lts-saucy:i386 \
+ libxatracker1-lts-saucy \
+ mesa-common-dev-lts-saucy:i386
+mesa_version=$(dpkg-query -s libgl1-mesa-dri-lts-saucy | grep ^Version | awk '{ print $2 }')
+[ "$mesa_version" = "9.2.1-1ubuntu3~precise1mozilla2" ] || exit 1
+
+# revert the list of repos
+cp sources.list.orig /etc/apt/sources.list
+apt-get update
+
+# node 5 requires a C++11 compiler.
+add-apt-repository ppa:ubuntu-toolchain-r/test
+apt-get update
+apt-get -y install gcc-4.8 g++-4.8
+update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 20 --slave /usr/bin/g++ g++ /usr/bin/g++-4.8
+update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.6 10 --slave /usr/bin/g++ g++ /usr/bin/g++-4.6
+
+# clean up
+apt_packages+=('mesa-common-dev')
+
+cd /
+rm -rf /setup ~/.ccache ~/.cache ~/.npm
+apt-get clean
+apt-get autoclean
+rm -f $0
diff --git a/testing/docker/recipes/ubuntu1604-test-system-setup.sh b/testing/docker/recipes/ubuntu1604-test-system-setup.sh
new file mode 100644
index 000000000..b58ee7cb1
--- /dev/null
+++ b/testing/docker/recipes/ubuntu1604-test-system-setup.sh
@@ -0,0 +1,180 @@
+#!/usr/bin/env bash
+
+set -ve
+
+test `whoami` == 'root'
+
+mkdir -p /setup
+cd /setup
+
+apt_packages=()
+
+apt_packages+=('alsa-base')
+apt_packages+=('alsa-utils')
+apt_packages+=('autoconf2.13')
+apt_packages+=('bluez-cups')
+apt_packages+=('build-essential')
+apt_packages+=('ca-certificates')
+apt_packages+=('ccache')
+apt_packages+=('curl')
+apt_packages+=('fonts-kacst')
+apt_packages+=('fonts-kacst-one')
+apt_packages+=('fonts-liberation')
+apt_packages+=('fonts-stix')
+apt_packages+=('fonts-unfonts-core')
+apt_packages+=('fonts-unfonts-extra')
+apt_packages+=('fonts-vlgothic')
+apt_packages+=('g++-multilib')
+apt_packages+=('gcc-multilib')
+apt_packages+=('gir1.2-gnomebluetooth-1.0')
+apt_packages+=('git')
+apt_packages+=('gstreamer0.10-alsa')
+apt_packages+=('gstreamer0.10-plugins-base')
+apt_packages+=('gstreamer0.10-plugins-good')
+apt_packages+=('gstreamer0.10-tools')
+apt_packages+=('language-pack-en-base')
+apt_packages+=('libasound2-dev')
+apt_packages+=('libcanberra-pulse')
+apt_packages+=('libcurl4-openssl-dev')
+apt_packages+=('libdbus-1-dev')
+apt_packages+=('libdbus-glib-1-dev')
+apt_packages+=('libgconf2-dev')
+apt_packages+=('libgstreamer-plugins-base0.10-dev')
+apt_packages+=('libgstreamer0.10-dev')
+apt_packages+=('libgtk2.0-dev')
+apt_packages+=('libiw-dev')
+apt_packages+=('libnotify-dev')
+apt_packages+=('libpulse-dev')
+apt_packages+=('libsox-fmt-alsa')
+apt_packages+=('libxt-dev')
+apt_packages+=('libxxf86vm1')
+apt_packages+=('llvm')
+apt_packages+=('llvm-dev')
+apt_packages+=('llvm-runtime')
+apt_packages+=('nano')
+apt_packages+=('pulseaudio')
+apt_packages+=('pulseaudio-module-bluetooth')
+apt_packages+=('pulseaudio-module-gconf')
+apt_packages+=('rlwrap')
+apt_packages+=('screen')
+apt_packages+=('software-properties-common')
+apt_packages+=('sudo')
+apt_packages+=('tar')
+apt_packages+=('ttf-dejavu')
+apt_packages+=('ubuntu-desktop')
+apt_packages+=('unzip')
+apt_packages+=('uuid')
+apt_packages+=('vim')
+apt_packages+=('wget')
+apt_packages+=('xvfb')
+apt_packages+=('yasm')
+apt_packages+=('zip')
+
+# get xvinfo for test-linux.sh to monitor Xvfb startup
+apt_packages+=('x11-utils')
+
+# Bug 1232407 - this allows the user to start vnc
+apt_packages+=('x11vnc')
+
+# Bug 1176031: need `xset` to disable screensavers
+apt_packages+=('x11-xserver-utils')
+
+# use Ubuntu's Python-2.7 (2.7.3 on Precise)
+apt_packages+=('python-dev')
+apt_packages+=('python-pip')
+
+apt-get update
+# This allows ubuntu-desktop to be installed without human interaction
+export DEBIAN_FRONTEND=noninteractive
+apt-get install -y -f ${apt_packages[@]}
+
+dpkg-reconfigure locales
+
+. /setup/common.sh
+. /setup/install-mercurial.sh
+
+pip install --upgrade pip
+
+pip install virtualenv
+
+# Install node
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 5676610,
+ "digest": "ce27b788dfd141a5ba7674332825fc136fe2c4f49a319dd19b3a87c8fffa7a97d86cbb8535661c9a68c9122719aa969fc6a8c886458a0df9fc822eec99ed130b",
+ "algorithm": "sha512",
+ "filename": "node-v0.10.36-linux-x64.tar.gz"
+}
+]
+
+EOF
+tar -C /usr/local -xz --strip-components 1 < node-*.tar.gz
+node -v # verify
+
+# Install custom-built Debian packages. These come from a set of repositories
+# packaged in tarballs on tooltool to make them replicable. Because they have
+# inter-dependenices, we install all repositories first, then perform the
+# installation.
+cp /etc/apt/sources.list sources.list.orig
+
+# Install Valgrind (trunk, late Jan 2016) and do some crude sanity
+# checks. It has to go in /usr/local, otherwise it won't work. Copy
+# the launcher binary to /usr/bin, though, so that direct invokations
+# of /usr/bin/valgrind also work. Also install libc6-dbg since
+# Valgrind won't work at all without the debug symbols for libc.so and
+# ld.so being available.
+tooltool_fetch <<'EOF'
+[
+{
+ "size": 41331092,
+ "visibility": "public",
+ "digest": "a89393c39171b8304fc262094a650df9a756543ffe9fbec935911e7b86842c4828b9b831698f97612abb0eca95cf7f7b3ff33ea7a9b0313b30c9be413a5efffc",
+ "algorithm": "sha512",
+ "filename": "valgrind-15775-3206-ubuntu1204.tgz"
+}
+]
+EOF
+cp valgrind-15775-3206-ubuntu1204.tgz /tmp
+(cd / && tar xzf /tmp/valgrind-15775-3206-ubuntu1204.tgz)
+rm /tmp/valgrind-15775-3206-ubuntu1204.tgz
+cp /usr/local/bin/valgrind /usr/bin/valgrind
+apt-get install -y libc6-dbg
+valgrind --version
+valgrind date
+
+# Fetch the minidump_stackwalk binary specified by the in-tree tooltool manifest.
+python /setup/tooltool.py fetch -m /tmp/minidump_stackwalk.manifest
+rm /tmp/minidump_stackwalk.manifest
+mv linux64-minidump_stackwalk /usr/local/bin/
+chmod +x /usr/local/bin/linux64-minidump_stackwalk
+
+# adding multiverse to get 'ubuntu-restricted-extras' below
+apt-add-repository multiverse
+apt-get update
+
+# for mp4 codec (used in MSE tests)
+apt-get -q -y -f install ubuntu-restricted-extras
+
+apt-get -q -y -f install \
+ libxcb1 \
+ libxcb-render0 \
+ libxcb-shm0 \
+ libxcb-glx0 \
+ libxcb-shape0
+
+apt-get -q -y -f install \
+ libgl1-mesa-dri \
+ libgl1-mesa-glx \
+ mesa-common-dev
+
+# revert the list of repos
+cp sources.list.orig /etc/apt/sources.list
+apt-get update
+
+# clean up
+cd /
+rm -rf /setup ~/.ccache ~/.cache ~/.npm
+apt-get clean
+apt-get autoclean
+rm -f $0
diff --git a/testing/docker/recipes/xvfb.sh b/testing/docker/recipes/xvfb.sh
new file mode 100644
index 000000000..6e0e79f7d
--- /dev/null
+++ b/testing/docker/recipes/xvfb.sh
@@ -0,0 +1,75 @@
+#! /bin/bash -x
+
+set -x
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[xvfb.sh:error]" "${@}"
+ exit 1
+}
+
+cleanup_xvfb() {
+ # When you call this script with START_VNC or TASKCLUSTER_INTERACTIVE
+ # we make sure we do not kill xvfb so you do not lose your connection
+ local xvfb_pid=`pidof Xvfb`
+ local vnc=${START_VNC:-false}
+ local interactive=${TASKCLUSTER_INTERACTIVE:-false}
+ if [ -n "$xvfb_pid" ] && [[ $vnc == false ]] && [[ $interactive == false ]] ; then
+ kill $xvfb_pid || true
+ screen -XS xvfb quit || true
+ fi
+}
+
+# Attempt to start xvfb in a screen session with the given resolution and display
+# number. Up to 5 attempts will be made to start xvfb with a short delay
+# between retries
+try_xvfb() {
+ screen -dmS xvfb Xvfb :$2 -nolisten tcp -screen 0 $1 \
+ > ~/artifacts/xvfb/xvfb.log 2>&1
+ export DISPLAY=:$2
+
+ # Only error code 255 matters, because it signifies that no
+ # display could be opened. As long as we can open the display
+ # tests should work. We'll retry a few times with a sleep before
+ # failing.
+ local retry_count=0
+ local max_retries=5
+ xvfb_test=0
+ until [ $retry_count -gt $max_retries ]; do
+ xvinfo || xvfb_test=$?
+ if [ $xvfb_test != 255 ]; then
+ retry_count=$(($max_retries + 1))
+ else
+ retry_count=$(($retry_count + 1))
+ echo "Failed to start Xvfb, retry: $retry_count"
+ sleep 2
+ fi
+ done
+ if [ $xvfb_test == 255 ]; then
+ return 1
+ else
+ return 0
+ fi
+}
+
+start_xvfb() {
+ set +e
+ mkdir -p ~/artifacts/xvfb
+ local retry_count=0
+ local max_retries=2
+ local success=1
+ until [ $retry_count -gt $max_retries ]; do
+ try_xvfb $1 $2
+ success=$?
+ if [ $success -eq 0 ]; then
+ retry_count=$(($max_retries + 1))
+ else
+ retry_count=$(($retry_count + 1))
+ sleep 10
+ fi
+ done
+ set -e
+ if [ $success -eq 1 ]; then
+ fail "Could not start xvfb after ${max_retries} attempts"
+ fi
+}