summaryrefslogtreecommitdiffstats
path: root/taskcluster
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /taskcluster
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'taskcluster')
-rw-r--r--taskcluster/ci/artifact-build/kind.yml39
-rw-r--r--taskcluster/ci/build-signing/kind.yml8
-rw-r--r--taskcluster/ci/build/android.yml137
-rw-r--r--taskcluster/ci/build/kind.yml16
-rw-r--r--taskcluster/ci/build/linux.yml254
-rw-r--r--taskcluster/ci/build/macosx.yml71
-rw-r--r--taskcluster/ci/build/windows.yml122
-rw-r--r--taskcluster/ci/desktop-test/kind.yml12
-rw-r--r--taskcluster/ci/desktop-test/test-platforms.yml73
-rw-r--r--taskcluster/ci/desktop-test/test-sets.yml118
-rw-r--r--taskcluster/ci/desktop-test/tests.yml721
-rw-r--r--taskcluster/ci/docker-image/image.yml68
-rw-r--r--taskcluster/ci/docker-image/kind.yml19
-rw-r--r--taskcluster/ci/hazard/kind.yml58
-rw-r--r--taskcluster/ci/l10n/kind.yml89
-rw-r--r--taskcluster/ci/marionette-harness/kind.yml51
-rw-r--r--taskcluster/ci/source-check/doc.yml32
-rw-r--r--taskcluster/ci/source-check/kind.yml15
-rw-r--r--taskcluster/ci/source-check/mozlint.yml97
-rw-r--r--taskcluster/ci/source-check/python-tests.yml49
-rw-r--r--taskcluster/ci/spidermonkey/kind.yml199
-rw-r--r--taskcluster/ci/static-analysis/kind.yml63
-rw-r--r--taskcluster/ci/toolchain/kind.yml14
-rw-r--r--taskcluster/ci/toolchain/linux.yml66
-rw-r--r--taskcluster/ci/toolchain/windows.yml23
-rw-r--r--taskcluster/ci/upload-symbols/job-template.yml19
-rw-r--r--taskcluster/ci/upload-symbols/kind.yml19
-rw-r--r--taskcluster/ci/valgrind/kind.yml40
-rw-r--r--taskcluster/docs/attributes.rst124
-rw-r--r--taskcluster/docs/caches.rst43
-rw-r--r--taskcluster/docs/docker-images.rst42
-rw-r--r--taskcluster/docs/how-tos.rst220
-rw-r--r--taskcluster/docs/index.rst30
-rw-r--r--taskcluster/docs/kinds.rst144
-rw-r--r--taskcluster/docs/loading.rst31
-rw-r--r--taskcluster/docs/parameters.rst97
-rw-r--r--taskcluster/docs/reference.rst12
-rw-r--r--taskcluster/docs/taskgraph.rst276
-rw-r--r--taskcluster/docs/transforms.rst198
-rw-r--r--taskcluster/docs/yaml-templates.rst49
-rw-r--r--taskcluster/mach_commands.py290
-rw-r--r--taskcluster/moz.build7
-rwxr-xr-xtaskcluster/scripts/builder/build-haz-linux.sh89
-rwxr-xr-xtaskcluster/scripts/builder/build-l10n.sh98
-rwxr-xr-xtaskcluster/scripts/builder/build-linux.sh122
-rwxr-xr-xtaskcluster/scripts/builder/build-sm-mozjs-crate.sh18
-rwxr-xr-xtaskcluster/scripts/builder/build-sm-package.sh28
-rwxr-xr-xtaskcluster/scripts/builder/build-sm.sh20
-rwxr-xr-xtaskcluster/scripts/builder/desktop-setup.sh24
-rwxr-xr-xtaskcluster/scripts/builder/get-objdir.py20
-rwxr-xr-xtaskcluster/scripts/builder/hazard-analysis.sh149
-rwxr-xr-xtaskcluster/scripts/builder/install-packages.sh13
-rw-r--r--taskcluster/scripts/builder/setup-ccache.sh9
-rwxr-xr-xtaskcluster/scripts/builder/sm-tooltool-config.sh50
-rwxr-xr-xtaskcluster/scripts/copy.sh9
-rwxr-xr-xtaskcluster/scripts/misc/build-binutils-linux.sh16
-rwxr-xr-xtaskcluster/scripts/misc/build-cctools.sh82
-rwxr-xr-xtaskcluster/scripts/misc/build-clang-linux.sh30
-rwxr-xr-xtaskcluster/scripts/misc/build-clang-windows.sh61
-rwxr-xr-xtaskcluster/scripts/misc/build-gcc-linux.sh16
-rwxr-xr-xtaskcluster/scripts/misc/minidump_stackwalk.sh125
-rwxr-xr-xtaskcluster/scripts/misc/repackage-jdk-centos.sh45
-rw-r--r--taskcluster/scripts/tester/harness-test-linux.sh40
-rwxr-xr-xtaskcluster/scripts/tester/run-wizard170
-rw-r--r--taskcluster/scripts/tester/test-b2g.sh118
-rw-r--r--taskcluster/scripts/tester/test-macosx.sh77
-rw-r--r--taskcluster/scripts/tester/test-ubuntu.sh188
-rw-r--r--taskcluster/taskgraph/__init__.py0
-rw-r--r--taskcluster/taskgraph/action.py68
-rw-r--r--taskcluster/taskgraph/action.yml74
-rw-r--r--taskcluster/taskgraph/create.py122
-rw-r--r--taskcluster/taskgraph/decision.py181
-rw-r--r--taskcluster/taskgraph/docker.py132
-rw-r--r--taskcluster/taskgraph/files_changed.py65
-rw-r--r--taskcluster/taskgraph/generator.py218
-rw-r--r--taskcluster/taskgraph/graph.py117
-rw-r--r--taskcluster/taskgraph/optimize.py156
-rw-r--r--taskcluster/taskgraph/parameters.py72
-rw-r--r--taskcluster/taskgraph/target_tasks.py121
-rw-r--r--taskcluster/taskgraph/task/__init__.py0
-rw-r--r--taskcluster/taskgraph/task/base.py108
-rw-r--r--taskcluster/taskgraph/task/docker_image.py130
-rw-r--r--taskcluster/taskgraph/task/post_build.py53
-rw-r--r--taskcluster/taskgraph/task/signing.py64
-rw-r--r--taskcluster/taskgraph/task/test.py112
-rw-r--r--taskcluster/taskgraph/task/transform.py109
-rw-r--r--taskcluster/taskgraph/taskgraph.py82
-rw-r--r--taskcluster/taskgraph/test/__init__.py0
-rw-r--r--taskcluster/taskgraph/test/automationrelevance.json425
-rw-r--r--taskcluster/taskgraph/test/test_create.py76
-rw-r--r--taskcluster/taskgraph/test/test_decision.py78
-rw-r--r--taskcluster/taskgraph/test/test_files_changed.py73
-rw-r--r--taskcluster/taskgraph/test/test_generator.py129
-rw-r--r--taskcluster/taskgraph/test/test_graph.py157
-rw-r--r--taskcluster/taskgraph/test/test_optimize.py256
-rw-r--r--taskcluster/taskgraph/test/test_parameters.py62
-rw-r--r--taskcluster/taskgraph/test/test_target_tasks.py81
-rw-r--r--taskcluster/taskgraph/test/test_task_docker_image.py35
-rw-r--r--taskcluster/taskgraph/test/test_taskgraph.py54
-rw-r--r--taskcluster/taskgraph/test/test_transforms_base.py143
-rw-r--r--taskcluster/taskgraph/test/test_try_option_syntax.py274
-rw-r--r--taskcluster/taskgraph/test/test_util_attributes.py45
-rw-r--r--taskcluster/taskgraph/test/test_util_docker.py194
-rw-r--r--taskcluster/taskgraph/test/test_util_python_path.py31
-rwxr-xr-xtaskcluster/taskgraph/test/test_util_templates.py232
-rwxr-xr-xtaskcluster/taskgraph/test/test_util_time.py57
-rw-r--r--taskcluster/taskgraph/test/test_util_treeherder.py23
-rw-r--r--taskcluster/taskgraph/test/test_util_yaml.py23
-rw-r--r--taskcluster/taskgraph/test/util.py24
-rw-r--r--taskcluster/taskgraph/transforms/__init__.py0
-rw-r--r--taskcluster/taskgraph/transforms/android_stuff.py46
-rw-r--r--taskcluster/taskgraph/transforms/base.py126
-rw-r--r--taskcluster/taskgraph/transforms/build.py31
-rw-r--r--taskcluster/taskgraph/transforms/build_attrs.py33
-rw-r--r--taskcluster/taskgraph/transforms/gecko_v2_whitelist.py77
-rw-r--r--taskcluster/taskgraph/transforms/job/__init__.py164
-rw-r--r--taskcluster/taskgraph/transforms/job/common.py108
-rw-r--r--taskcluster/taskgraph/transforms/job/hazard.py91
-rw-r--r--taskcluster/taskgraph/transforms/job/mach.py30
-rw-r--r--taskcluster/taskgraph/transforms/job/mozharness.py226
-rw-r--r--taskcluster/taskgraph/transforms/job/run_task.py59
-rw-r--r--taskcluster/taskgraph/transforms/job/spidermonkey.py86
-rw-r--r--taskcluster/taskgraph/transforms/job/toolchain.py115
-rw-r--r--taskcluster/taskgraph/transforms/l10n.py44
-rw-r--r--taskcluster/taskgraph/transforms/marionette_harness.py37
-rw-r--r--taskcluster/taskgraph/transforms/task.py648
-rw-r--r--taskcluster/taskgraph/transforms/tests/__init__.py0
-rw-r--r--taskcluster/taskgraph/transforms/tests/all_kinds.py137
-rw-r--r--taskcluster/taskgraph/transforms/tests/android_test.py42
-rw-r--r--taskcluster/taskgraph/transforms/tests/desktop_test.py118
-rw-r--r--taskcluster/taskgraph/transforms/tests/make_task_description.py445
-rw-r--r--taskcluster/taskgraph/transforms/tests/test_description.py235
-rw-r--r--taskcluster/taskgraph/transforms/upload_symbols.py36
-rw-r--r--taskcluster/taskgraph/try_option_syntax.py559
-rw-r--r--taskcluster/taskgraph/util/__init__.py0
-rw-r--r--taskcluster/taskgraph/util/attributes.py26
-rw-r--r--taskcluster/taskgraph/util/docker.py160
-rw-r--r--taskcluster/taskgraph/util/python_path.py27
-rw-r--r--taskcluster/taskgraph/util/seta.py85
-rw-r--r--taskcluster/taskgraph/util/templates.py155
-rw-r--r--taskcluster/taskgraph/util/time.py114
-rw-r--r--taskcluster/taskgraph/util/treeherder.py24
-rw-r--r--taskcluster/taskgraph/util/yaml.py16
143 files changed, 14228 insertions, 0 deletions
diff --git a/taskcluster/ci/artifact-build/kind.yml b/taskcluster/ci/artifact-build/kind.yml
new file mode 100644
index 000000000..46e522ff5
--- /dev/null
+++ b/taskcluster/ci/artifact-build/kind.yml
@@ -0,0 +1,39 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+jobs:
+ linux64-artifact/opt:
+ description: "Linux64 Opt Artifact Build"
+ index:
+ product: firefox
+ job-name: linux64-artifact-opt
+ treeherder:
+ platform: linux64/opt
+ kind: build
+ symbol: AB
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build}
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build generate-build-stats]
+ config:
+ - builds/releng_sub_linux_configs/64_artifact.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+ keep-artifacts: false
+
diff --git a/taskcluster/ci/build-signing/kind.yml b/taskcluster/ci/build-signing/kind.yml
new file mode 100644
index 000000000..89d6e0220
--- /dev/null
+++ b/taskcluster/ci/build-signing/kind.yml
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: 'taskgraph.task.signing:SigningTask'
+
+kind-dependencies:
+ - build
diff --git a/taskcluster/ci/build/android.yml b/taskcluster/ci/build/android.yml
new file mode 100644
index 000000000..74088e3d3
--- /dev/null
+++ b/taskcluster/ci/build/android.yml
@@ -0,0 +1,137 @@
+android-api-15/debug:
+ description: "Android 4.0 API15+ Debug"
+ index:
+ product: mobile
+ job-name:
+ buildbot: android-api-15-debug
+ gecko-v2: android-api-15-debug
+ treeherder:
+ platform: android-4-0-armv7-api15/debug
+ symbol: tc(B)
+ worker-type: aws-provisioner-v1/gecko-{level}-b-android
+ worker:
+ implementation: docker-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ actions: [get-secrets build multi-l10n update]
+ config:
+ - builds/releng_base_android_64_builds.py
+ - disable_signing.py
+ - platform_supports_post_upload_to_latest.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ custom-build-variant-cfg: api-15-debug
+ tooltool-downloads: internal
+
+android-x86/opt:
+ description: "Android 4.2 x86 Opt"
+ index:
+ product: mobile
+ job-name: android-x86-opt
+ treeherder:
+ platform: android-4-2-x86/opt
+ symbol: tc(B)
+ tier: 1
+ worker-type: aws-provisioner-v1/gecko-{level}-b-android
+ worker:
+ implementation: docker-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ actions: [get-secrets build multi-l10n update]
+ config:
+ - builds/releng_base_android_64_builds.py
+ - disable_signing.py
+ - platform_supports_post_upload_to_latest.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ custom-build-variant-cfg: x86
+ tooltool-downloads: internal
+
+android-api-15/opt:
+ description: "Android 4.0 API15+ Opt"
+ index:
+ product: mobile
+ job-name: android-api-15-opt
+ treeherder:
+ platform: android-4-0-armv7-api15/opt
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-android
+ worker:
+ implementation: docker-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ actions: [get-secrets build multi-l10n update]
+ config:
+ - builds/releng_base_android_64_builds.py
+ - disable_signing.py
+ - platform_supports_post_upload_to_latest.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ custom-build-variant-cfg: api-15
+ tooltool-downloads: internal
+
+android-api-15-nightly/opt:
+ description: "Android 4.0 API15+ Nightly"
+ attributes:
+ nightly: true
+ index:
+ product: mobile
+ job-name: android-api-15-nightly-opt
+ treeherder:
+ platform: android-4-0-armv7-api15/opt
+ symbol: tc(N)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-android
+ worker:
+ implementation: docker-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ actions: [get-secrets build multi-l10n update]
+ config:
+ - builds/releng_base_android_64_builds.py
+ - disable_signing.py
+ - platform_supports_post_upload_to_latest.py
+ - taskcluster_nightly.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ custom-build-variant-cfg: api-15
+ tooltool-downloads: internal
+ run-on-projects: []
+
+android-api-15-gradle/opt:
+ description: "Android 4.0 API15+ (Gradle) Opt"
+ index:
+ product: mobile
+ job-name: android-api-15-gradle-opt
+ treeherder:
+ platform: android-4-0-armv7-api15/opt
+ symbol: tc(Bg)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-android
+ worker:
+ implementation: docker-worker
+ max-run-time: 7200
+ env:
+ # Bug 1292762 - Set GRADLE_USER_HOME to avoid sdk-manager-plugin intermittent
+ GRADLE_USER_HOME: /home/worker/workspace/build/src/dotgradle
+ artifacts:
+ - name: public/android/maven
+ path: /home/worker/workspace/build/src/obj-firefox/gradle/build/mobile/android/geckoview/maven/
+ type: directory
+ - name: public/android/geckoview_example.apk
+ path: /home/worker/workspace/build/src/obj-firefox/gradle/build/mobile/android/geckoview_example/outputs/apk/geckoview_example-withGeckoBinaries.apk
+ type: file
+ - name: public/build
+ path: /home/worker/artifacts/
+ type: directory
+ run:
+ using: mozharness
+ actions: [get-secrets build multi-l10n update]
+ config:
+ - builds/releng_base_android_64_builds.py
+ - disable_signing.py
+ - platform_supports_post_upload_to_latest.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ custom-build-variant-cfg: api-15-gradle
+ tooltool-downloads: internal
diff --git a/taskcluster/ci/build/kind.yml b/taskcluster/ci/build/kind.yml
new file mode 100644
index 000000000..acb8548c9
--- /dev/null
+++ b/taskcluster/ci/build/kind.yml
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build:transforms
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+jobs-from:
+ - linux.yml
+ - macosx.yml
+ - windows.yml
diff --git a/taskcluster/ci/build/linux.yml b/taskcluster/ci/build/linux.yml
new file mode 100644
index 000000000..7143522c8
--- /dev/null
+++ b/taskcluster/ci/build/linux.yml
@@ -0,0 +1,254 @@
+linux64/opt:
+ description: "Linux64 Opt"
+ index:
+ product: firefox
+ job-name: linux64-opt
+ treeherder:
+ platform: linux64/opt
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux64/pgo:
+ description: "Linux64 PGO"
+ index:
+ product: firefox
+ job-name:
+ buildbot: linux64-pgo
+ gecko-v2: linux64-pgo
+ treeherder:
+ platform: linux64/pgo
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ coalesce-name: linux64-pgo
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ options: [enable-pgo]
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux64/debug:
+ description: "Linux64 Debug"
+ index:
+ product: firefox
+ job-name:
+ buildbot: linux64-debug
+ gecko-v2: linux64-debug
+ treeherder:
+ platform: linux64/debug
+ symbol: tc(B)
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ custom-build-variant-cfg: debug
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux/opt:
+ description: "Linux32 Opt"
+ index:
+ product: firefox
+ job-name: linux-opt
+ treeherder:
+ platform: linux32/opt
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ coalesce-name: opt_linux32
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_32_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux/debug:
+ description: "Linux32 Debug"
+ index:
+ product: firefox
+ job-name: linux-debug
+ treeherder:
+ platform: linux32/debug
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ coalesce-name: dbg_linux32
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_32_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ custom-build-variant-cfg: debug
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux/pgo:
+ description: "Linux32 PGO"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: linux-pgo
+ treeherder:
+ platform: linux32/pgo
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ coalesce-name: linux32-pgo
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ options: [enable-pgo]
+ config:
+ - builds/releng_base_linux_32_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux64-asan/opt:
+ description: "Linux64 Opt ASAN"
+ index:
+ product: firefox
+ job-name: linux64-asan-opt
+ treeherder:
+ platform: linux64/asan
+ symbol: tc(Bo)
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ custom-build-variant-cfg: asan-tc
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux64-asan/debug:
+ description: "Linux64 Debug ASAN"
+ index:
+ product: firefox
+ job-name: linux64-asan-debug
+ treeherder:
+ platform: linux64/asan
+ symbol: tc(Bd)
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ custom-build-variant-cfg: asan-tc-and-debug
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux64-jsdcov/opt:
+ description: "Linux64-JSDCov Opt"
+ index:
+ product: firefox
+ job-name: linux64-jsdcov-opt
+ treeherder:
+ platform: linux64/jsdcov
+ symbol: tc(B)
+ tier: 2
+ run-on-projects: [ ]
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+
+linux64-ccov/opt:
+ description: "Linux64-CCov Opt"
+ index:
+ product: firefox
+ job-name: linux64-ccov-opt
+ treeherder:
+ platform: linux64/ccov
+ symbol: tc(B)
+ tier: 2
+ run-on-projects: [ ]
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build check-test generate-build-stats update]
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ custom-build-variant-cfg: code-coverage
+ tooltool-downloads: public
+ need-xvfb: true
diff --git a/taskcluster/ci/build/macosx.yml b/taskcluster/ci/build/macosx.yml
new file mode 100644
index 000000000..b8d03669c
--- /dev/null
+++ b/taskcluster/ci/build/macosx.yml
@@ -0,0 +1,71 @@
+macosx64/debug:
+ description: "MacOS X x64 Cross-compile"
+ index:
+ product: firefox
+ job-name: macosx64-debug
+ treeherder:
+ platform: osx-10-7/debug
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-macosx64
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build generate-build-stats update]
+ config:
+ - builds/releng_base_mac_64_cross_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ custom-build-variant-cfg: cross-debug
+ tooltool-downloads: internal
+
+macosx64/opt:
+ description: "MacOS X x64 Cross-compile"
+ index:
+ product: firefox
+ job-name: macosx64-opt
+ treeherder:
+ platform: osx-10-7/opt
+ symbol: tc(B)
+ tier: 2
+ run-on-projects: [ ]
+ worker-type: aws-provisioner-v1/gecko-{level}-b-macosx64
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build generate-build-stats update]
+ config:
+ - builds/releng_base_mac_64_cross_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: internal
+
+macosx64-universal/opt:
+ description: "MacOS X Universal Cross-compile"
+ index:
+ product: firefox
+ job-name: macosx64-opt
+ treeherder:
+ platform: osx-10-7/opt
+ symbol: tc(Bu)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-macosx64
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ run:
+ using: mozharness
+ actions: [get-secrets build generate-build-stats update]
+ config:
+ - builds/releng_base_mac_64_cross_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ custom-build-variant-cfg: cross-universal
+ tooltool-downloads: internal
diff --git a/taskcluster/ci/build/windows.yml b/taskcluster/ci/build/windows.yml
new file mode 100644
index 000000000..a3211219c
--- /dev/null
+++ b/taskcluster/ci/build/windows.yml
@@ -0,0 +1,122 @@
+win32/debug:
+ description: "Win32 Debug"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: win32-debug
+ treeherder:
+ platform: windows2012-32/debug
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+ worker:
+ implementation: generic-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ script: mozharness/scripts/fx_desktop_build.py
+ config:
+ - builds/taskcluster_firefox_win32_debug.py
+
+win32/opt:
+ description: "Win32 Opt"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: win32-opt
+ treeherder:
+ platform: windows2012-32/opt
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+ worker:
+ implementation: generic-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ script: mozharness/scripts/fx_desktop_build.py
+ config:
+ - builds/taskcluster_firefox_win32_opt.py
+
+win32/pgo:
+ description: "Win32 Opt PGO"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: win32-pgo
+ treeherder:
+ platform: windows2012-32/pgo
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+ worker:
+ implementation: generic-worker
+ max-run-time: 9000
+ run:
+ using: mozharness
+ options: [enable-pgo]
+ script: mozharness/scripts/fx_desktop_build.py
+ config:
+ - builds/taskcluster_firefox_win32_opt.py
+
+win64/debug:
+ description: "Win64 Debug"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: win64-debug
+ treeherder:
+ platform: windows2012-64/debug
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+ worker:
+ implementation: generic-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ script: mozharness/scripts/fx_desktop_build.py
+ config:
+ - builds/taskcluster_firefox_win64_debug.py
+
+win64/opt:
+ description: "Win64 Opt"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: win64-opt
+ treeherder:
+ platform: windows2012-64/opt
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+ worker:
+ implementation: generic-worker
+ max-run-time: 7200
+ run:
+ using: mozharness
+ script: mozharness/scripts/fx_desktop_build.py
+ config:
+ - builds/taskcluster_firefox_win64_opt.py
+
+win64/pgo:
+ description: "Win64 Opt PGO"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: win64-pgo
+ treeherder:
+ platform: windows2012-64/pgo
+ symbol: tc(B)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+ worker:
+ implementation: generic-worker
+ max-run-time: 10800
+ run:
+ using: mozharness
+ options: [enable-pgo]
+ script: mozharness/scripts/fx_desktop_build.py
+ config:
+ - builds/taskcluster_firefox_win64_opt.py
+
diff --git a/taskcluster/ci/desktop-test/kind.yml b/taskcluster/ci/desktop-test/kind.yml
new file mode 100644
index 000000000..31e305cbe
--- /dev/null
+++ b/taskcluster/ci/desktop-test/kind.yml
@@ -0,0 +1,12 @@
+implementation: taskgraph.task.test:TestTask
+
+kind-dependencies:
+ - build
+
+transforms:
+ - taskgraph.transforms.tests.test_description:validate
+ - taskgraph.transforms.tests.desktop_test:transforms
+ - taskgraph.transforms.tests.all_kinds:transforms
+ - taskgraph.transforms.tests.test_description:validate
+ - taskgraph.transforms.tests.make_task_description:transforms
+ - taskgraph.transforms.task:transforms
diff --git a/taskcluster/ci/desktop-test/test-platforms.yml b/taskcluster/ci/desktop-test/test-platforms.yml
new file mode 100644
index 000000000..b03d3c41d
--- /dev/null
+++ b/taskcluster/ci/desktop-test/test-platforms.yml
@@ -0,0 +1,73 @@
+# This file maps build platforms to test platforms. In some cases, a
+# single build may be tested on multiple test platforms, but a single test
+# platform can only link to one build platform. Both build and test platforms
+# are represented as <platform>/<type>, where <type> is what Treeherder calls a
+# collection.
+#
+# Each test platform further specifies the set of tests that will be scheduled
+# for the platform, referring to tests defined in test-sets.yml.
+#
+# Note that set does not depend on the tree; tree-dependent job selection
+# should be performed in the target task selection phase of task-graph
+# generation.
+
+linux64/debug:
+ build-platform: linux64/debug
+ test-set: all-tests
+linux64/opt:
+ build-platform: linux64/opt
+ test-set: all-tests
+
+# TODO: use 'pgo' and 'asan' labels here, instead of -pgo/opt
+linux64-pgo/opt:
+ build-platform: linux64-pgo/opt
+ test-set: all-tests
+linux64-asan/opt:
+ build-platform: linux64-asan/opt
+ test-set: asan-tests
+
+linux64-ccov/opt:
+ build-platform: linux64-ccov/opt
+ test-set: ccov-code-coverage-tests
+linux64-jsdcov/opt:
+ build-platform: linux64-jsdcov/opt
+ test-set: jsdcov-code-coverage-tests
+
+# win32 vm
+windows7-32-vm/debug:
+ build-platform: win32/debug
+ test-set: windows-vm-tests
+windows7-32-vm/opt:
+ build-platform: win32/opt
+ test-set: windows-vm-tests
+
+# win32 gpu
+#windows7-32/debug:
+# build-platform: win32/debug
+# test-set: windows-gpu-tests
+#windows7-32/opt:
+# build-platform: win32/opt
+# test-set: windows-gpu-tests
+
+# win64 vm
+windows10-64-vm/debug:
+ build-platform: win64/debug
+ test-set: windows-vm-tests
+windows10-64-vm/opt:
+ build-platform: win64/opt
+ test-set: windows-vm-tests
+
+# win64 gpu
+#windows10-64/debug:
+# build-platform: win64/debug
+# test-set: windows-gpu-tests
+#windows10-64/opt:
+# build-platform: win64/opt
+# test-set: windows-gpu-tests
+
+# macosx64/debug:
+# build-platform: macosx64/debug
+# test-set: macosx64-tests
+# macosx64/opt:
+# build-platform: macosx64/opt
+# test-set: macosx64-tests
diff --git a/taskcluster/ci/desktop-test/test-sets.yml b/taskcluster/ci/desktop-test/test-sets.yml
new file mode 100644
index 000000000..da1ea6263
--- /dev/null
+++ b/taskcluster/ci/desktop-test/test-sets.yml
@@ -0,0 +1,118 @@
+# Each key in this file specifies a set of tests to run. Different test sets
+# may, for example, be bound to different test platforms.
+#
+# Note that set does not depend on the tree; tree-dependent job selection
+# should be performed in the target task selection phase of task-graph
+# generation.
+#
+# A test set has a name, and a list of tests that it contains.
+#
+# Test names given here reference tests.yml.
+
+all-tests:
+ - cppunit
+ - crashtest
+ - firefox-ui-functional-local
+ - firefox-ui-functional-remote
+ - gtest
+ - jittest
+ - jsreftest
+ - marionette
+ - mochitest
+ - mochitest-a11y
+ - mochitest-browser-chrome
+ - mochitest-chrome
+ - mochitest-clipboard
+ - mochitest-devtools-chrome
+ - mochitest-gpu
+ - mochitest-jetpack
+ - mochitest-media
+ - mochitest-webgl
+ - reftest
+ - reftest-no-accel
+ - web-platform-tests
+ - web-platform-tests-reftests
+ - web-platform-tests-wdspec
+ - xpcshell
+
+asan-tests:
+ - cppunit
+ - crashtest
+ - firefox-ui-functional-local
+ - firefox-ui-functional-remote
+ - gtest
+ - jittest
+ - jsreftest
+ - marionette
+ - mochitest
+ - mochitest-a11y
+ - mochitest-browser-chrome
+ - mochitest-chrome
+ - mochitest-clipboard
+ - mochitest-devtools-chrome
+ - mochitest-gpu
+ - mochitest-jetpack
+ - mochitest-media
+ - mochitest-webgl
+ - reftest
+ - reftest-no-accel
+ - xpcshell
+
+windows-vm-tests:
+ - cppunit
+ #- crashtest
+ - external-media-tests
+ #- gtest
+ #- jittest
+ #- jsreftest
+ #- marionette
+ #- mochitest
+ #- mochitest-browser-chrome
+ #- mochitest-devtools-chrome
+ #- mochitest-jetpack
+ #- mochitest-media
+ #- web-platform-tests
+ #- web-platform-tests-reftests
+ #- xpcshell
+
+# windows-gpu-tests:
+# - reftest
+# - reftest-no-accel
+# - mochitest-webgl
+
+# these tests currently run on hardware, but may migrate above when validated
+# note: on win, mochitest-a11y and mochitest-chrome come under mochitest-other
+# windows-hw-tests:
+# - mochitest-clipboard
+# - mochitest-gpu
+# - mochitest-other
+
+ccov-code-coverage-tests:
+ - mochitest
+ - mochitest-browser-chrome
+ - mochitest-devtools-chrome
+ - xpcshell
+
+jsdcov-code-coverage-tests:
+ - mochitest-browser-chrome
+ - mochitest-devtools-chrome
+
+macosx64-tests:
+ - cppunit
+ - crashtest
+ # - gtest
+ - jsreftest
+ # - marionette
+ # - mochitest
+ # - mochitest-browser-chrome
+ # - mochitest-clipboard
+ # - mochitest-devtools-chrome
+ # - mochitest-gpu
+ # - mochitest-jetpack
+ # - mochitest-media
+ # - mochitest-other
+ - mochitest-webgl
+ # - reftest
+ # - web-platform-tests
+ # - web-platform-tests-reftests
+ # - xpcshell
diff --git a/taskcluster/ci/desktop-test/tests.yml b/taskcluster/ci/desktop-test/tests.yml
new file mode 100644
index 000000000..edfb30909
--- /dev/null
+++ b/taskcluster/ci/desktop-test/tests.yml
@@ -0,0 +1,721 @@
+# Each stanza here describes a particular test suite or sub-suite. These are
+# processed through the transformations described in kind.yml to produce a
+# bunch of tasks. See the schema in `test-descriptions.py` for a description
+# of the fields used here.
+
+# Note that these are in lexical order
+
+cppunit:
+ description: "CPP Unit Tests"
+ suite: cppunittest
+ treeherder-symbol: tc(Cpp)
+ e10s: false
+ run-on-projects:
+ by-test-platform:
+ windows.*: ['mozilla-central', 'try']
+ default: ['all']
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --cppunittest-suite=cppunittest
+
+crashtest:
+ description: "Crashtest run"
+ suite: reftest/crashtest
+ treeherder-symbol: tc-R(C)
+ docker-image: {"in-tree": "desktop1604-test"}
+ e10s:
+ by-test-platform:
+ # Bug 1304435
+ win.*: false
+ default: both
+ mozharness:
+ script: desktop_unittest.py
+ chunked: true
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --reftest-suite=crashtest
+
+external-media-tests:
+ description: "External Media Test run"
+ suite: external-media-tests
+ treeherder-symbol: tc-VP(b-m)
+ e10s: false
+ tier: 2
+ max-run-time: 5400
+ run-on-projects:
+ by-test-platform:
+ windows.*: ['mozilla-central', 'try']
+ default: ['all']
+ mozharness:
+ script: firefox_media_tests_taskcluster.py
+ config:
+ by-test-platform:
+ win.*:
+ - mediatests/taskcluster_windows_config.py
+ default:
+ - mediatests/taskcluster_posix_config.py
+ - remove_executables.py
+
+firefox-ui-functional-local:
+ description: "Firefox-ui-tests functional run"
+ suite: "firefox-ui/functional local"
+ treeherder-symbol: tc-Fxfn-l(en-US)
+ max-run-time: 5400
+ tier: 1
+ docker-image: {"in-tree": "desktop1604-test"}
+ mozharness:
+ script: firefox_ui_tests/functional.py
+ config:
+ - firefox_ui_tests/taskcluster.py
+ - remove_executables.py
+ extra-options:
+ - "--tag"
+ - "local"
+
+firefox-ui-functional-remote:
+ description: "Firefox-ui-tests functional run"
+ suite: "firefox-ui/functional remote"
+ treeherder-symbol: tc-Fxfn-r(en-US)
+ max-run-time: 5400
+ tier: 2
+ docker-image: {"in-tree": "desktop1604-test"}
+ mozharness:
+ script: firefox_ui_tests/functional.py
+ config:
+ - firefox_ui_tests/taskcluster.py
+ - remove_executables.py
+ extra-options:
+ - "--tag"
+ - "remote"
+
+gtest:
+ description: "GTests run"
+ suite: gtest
+ treeherder-symbol: tc(GTest)
+ e10s: false
+ instance-size: xlarge
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --gtest-suite=gtest
+
+jittest:
+ description: "JIT Test run"
+ suite: jittest/jittest-chunked
+ treeherder-symbol: tc(Jit)
+ e10s: false
+ chunks:
+ by-test-platform:
+ win.*: 1
+ default: 6
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --jittest-suite=jittest-chunked
+
+jsreftest:
+ description: "JS Reftest run"
+ suite: reftest/jsreftest
+ treeherder-symbol: tc-R(J)
+ chunks:
+ by-test-platform:
+ win.*: 1
+ default: 2
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --reftest-suite=jsreftest
+
+marionette:
+ description: "Marionette unittest run"
+ suite: marionette
+ treeherder-symbol: tc(Mn)
+ max-run-time: 5400
+ docker-image: {"in-tree": "desktop1604-test"}
+ mozharness:
+ script: marionette.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - marionette/windows_taskcluster_config.py
+ default:
+ - marionette/prod_config.py
+ - remove_executables.py
+
+mochitest:
+ description: "Mochitest plain run"
+ suite: mochitest/plain-chunked
+ treeherder-symbol: tc-M()
+ loopback-video: true
+ run-on-projects:
+ by-test-platform:
+ linux64-ccov/opt: []
+ default: ['all']
+ chunks:
+ by-test-platform:
+ macosx.*: 5
+ win.*: 5
+ default: 10
+ e10s:
+ by-test-platform:
+ linux64-ccov/opt: false
+ default: both
+ max-run-time: 5400
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ chunked: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ by-test-platform:
+ linux64-ccov/opt:
+ - --mochitest-suite=plain-chunked
+ - --code-coverage
+ default:
+ - --mochitest-suite=plain-chunked
+ # Bug 1281241: migrating to m3.large instances
+ instance-size: legacy
+ allow-software-gl-layers: false
+
+mochitest-a11y:
+ description: "Mochitest a11y run"
+ suite: mochitest/a11y
+ treeherder-symbol: tc-M(a11y)
+ loopback-video: true
+ e10s: false
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ chunked: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=a11y
+
+mochitest-browser-chrome:
+ description: "Mochitest browser-chrome run"
+ suite:
+ by-test-platform:
+ linux64-jsdcov/opt: mochitest/browser-chrome-coverage
+ default: mochitest/browser-chrome-chunked
+ treeherder-symbol: tc-M(bc)
+ loopback-video: true
+ run-on-projects:
+ by-test-platform:
+ linux64-jsdcov/opt: []
+ linux64-ccov/opt: []
+ default: ['all']
+ chunks:
+ by-test-platform:
+ linux64-jsdcov/opt: 35
+ default: 10
+ e10s:
+ by-test-platform:
+ linux64-jsdcov/opt: false
+ linux64-ccov/opt: false
+ default: both
+ max-run-time:
+ by-test-platform:
+ linux64-jsdcov/opt: 7200
+ linux64-ccov/opt: 7200
+ linux64/debug: 5400
+ default: 3600
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ by-test-platform:
+ linux64-jsdcov/opt:
+ - --mochitest-suite=browser-chrome-coverage
+ linux64-ccov/opt:
+ - --mochitest-suite=browser-chrome-chunked
+ - --code-coverage
+ default:
+ - --mochitest-suite=browser-chrome-chunked
+ # Bug 1281241: migrating to m3.large instances
+ instance-size:
+ by-test-platform:
+ linux64-jsdcov/opt: xlarge
+ linux64-ccov/opt: xlarge
+ default: legacy
+ allow-software-gl-layers: false
+
+mochitest-chrome:
+ description: "Mochitest chrome run"
+ suite: mochitest/chrome
+ treeherder-symbol: tc-M(c)
+ loopback-video: true
+ chunks:
+ by-test-platform:
+ macosx.*: 1
+ default: 3
+ e10s: false
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=chrome
+
+mochitest-clipboard:
+ description: "Mochitest clipboard run"
+ suite: mochitest/plain-clipboard,chrome-clipboard,browser-chrome-clipboard,jetpack-package-clipboard
+ treeherder-symbol: tc-M(cl)
+ loopback-video: true
+ instance-size: xlarge
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ chunked: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=plain-clipboard,chrome-clipboard,browser-chrome-clipboard,jetpack-package-clipboard
+
+mochitest-devtools-chrome:
+ description: "Mochitest devtools-chrome run"
+ suite:
+ by-test-platform:
+ linux64-jsdcov/opt: mochitest/mochitest-devtools-chrome-coverage
+ default: mochitest/mochitest-devtools-chrome-chunked
+ treeherder-symbol: tc-M(dt)
+ loopback-video: true
+ max-run-time: 5400
+ chunks:
+ by-test-platform:
+ win.*: 8
+ default: 10
+ run-on-projects:
+ by-test-platform:
+ linux64-ccov/opt: []
+ linux64-jsdcov/opt: []
+ windows.*: ['mozilla-central', 'try']
+ default: ['all']
+ e10s:
+ by-test-platform:
+ # Bug 1242986: linux64/debug mochitest-devtools-chrome e10s is not greened up yet
+ linux64/debug: false
+ linux64-ccov/opt: false
+ linux64-jsdcov/opt: false
+ default: both
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ by-test-platform:
+ linux64-ccov/opt:
+ - --mochitest-suite=mochitest-devtools-chrome-chunked
+ - --code-coverage
+ linux64-jsdcov:
+ - --mochitest-suite=mochitest-devtools-chrome-coverage
+ default:
+ - --mochitest-suite=mochitest-devtools-chrome-chunked
+ instance-size:
+ by-test-platform:
+ # Bug 1281241: migrating to m3.large instances
+ linux64-asan/opt: legacy
+ default: default
+ # Bug 1296086: high number of intermittents observed with software GL and large instances
+ allow-software-gl-layers: false
+
+mochitest-gpu:
+ description: "Mochitest GPU run"
+ suite: mochitest/plain-gpu,chrome-gpu,browser-chrome-gpu
+ treeherder-symbol: tc-M(gpu)
+ loopback-video: true
+ e10s:
+ by-test-platform:
+ win.*: both
+ default: true
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ chunked: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=plain-gpu,chrome-gpu,browser-chrome-gpu
+
+mochitest-jetpack:
+ description: "Mochitest jetpack run"
+ suite: mochitest/jetpack-package
+ treeherder-symbol: tc-M(JP)
+ loopback-video: true
+ e10s: false
+ max-run-time: 5400
+ run-on-projects:
+ by-test-platform:
+ windows.*: ['mozilla-central', 'try']
+ default: ['all']
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ chunked: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=jetpack-package
+ - --mochitest-suite=jetpack-addon
+
+mochitest-media:
+ description: "Mochitest media run"
+ suite: mochitest/mochitest-media
+ treeherder-symbol: tc-M(mda)
+ max-run-time: 5400
+ loopback-video: true
+ instance-size: large
+ docker-image: {"in-tree": "desktop1604-test"}
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ chunked: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=mochitest-media
+
+mochitest-other:
+ description: "Mochitest other"
+ suite: mochitest/other
+ treeherder-symbol: tc-M(oth)
+ e10s: false
+ max-run-time: 5400
+ mozharness:
+ script: mozharness/scripts/desktop_unittest.py
+ no-read-buildbot-config: true
+ include-blob-upload-branch: true
+ chunked: true
+ config:
+ by-test-platform:
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=chrome,a11y
+
+mochitest-webgl:
+ description: "Mochitest webgl run"
+ suite: mochitest/mochitest-gl
+ treeherder-symbol: tc-M(gl)
+ chunks: 3
+ loopback-video: true
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ chunked: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --mochitest-suite=mochitest-gl
+ # Bug 1296733: llvmpipe with mesa 9.2.1 lacks thread safety
+ allow-software-gl-layers: false
+
+reftest:
+ description: "Reftest run"
+ suite: reftest/reftest
+ treeherder-symbol: tc-R(R)
+ docker-image: {"in-tree": "desktop1604-test"}
+ chunks:
+ by-test-platform:
+ macosx.*: 1
+ default: 8
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --reftest-suite=reftest
+
+reftest-no-accel:
+ description: "Reftest not accelerated run"
+ suite: reftest/reftest-no-accel
+ treeherder-symbol: tc-R(Ru)
+ docker-image: {"in-tree": "desktop1604-test"}
+ chunks:
+ by-test-platform:
+ macosx.*: 1
+ default: 8
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ - --reftest-suite=reftest-no-accel
+
+web-platform-tests:
+ description: "Web platform test run"
+ suite: web-platform-tests
+ treeherder-symbol: tc-W()
+ chunks:
+ by-test-platform:
+ macosx.*: 5
+ default: 12
+ max-run-time: 7200
+ instance-size: xlarge
+ docker-image: {"in-tree": "desktop1604-test"}
+ checkout: true
+ run-on-projects:
+ by-test-platform:
+ windows.*: ['mozilla-central', 'try']
+ default: ['all']
+ mozharness:
+ script: web_platform_tests.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - web_platform_tests/prod_config_windows_taskcluster.py
+ default:
+ - web_platform_tests/prod_config.py
+ - remove_executables.py
+ extra-options:
+ - --test-type=testharness
+
+web-platform-tests-reftests:
+ description: "Web platform reftest run"
+ suite: web-platform-tests-reftests
+ treeherder-symbol: tc-W(Wr)
+ max-run-time: 5400
+ instance-size: xlarge
+ docker-image: {"in-tree": "desktop1604-test"}
+ checkout: true
+ run-on-projects:
+ by-test-platform:
+ windows.*: ['mozilla-central', 'try']
+ default: ['all']
+ mozharness:
+ script: web_platform_tests.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - web_platform_tests/prod_config_windows_taskcluster.py
+ default:
+ - web_platform_tests/prod_config.py
+ - remove_executables.py
+ extra-options:
+ - --test-type=reftest
+
+web-platform-tests-wdspec:
+ description: "Web platform webdriver-spec run"
+ suite: web-platform-tests-wdspec
+ treeherder-symbol: tc-W(Wd)
+ max-run-time: 5400
+ instance-size: xlarge
+ docker-image: {"in-tree": "desktop1604-test"}
+ checkout: true
+ mozharness:
+ script: web_platform_tests.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - web_platform_tests/prod_config_windows_taskcluster.py
+ default:
+ - web_platform_tests/prod_config.py
+ - remove_executables.py
+ extra-options:
+ - --test-type=wdspec
+
+xpcshell:
+ description: "xpcshell test run"
+ suite: xpcshell
+ treeherder-symbol: tc-X()
+ run-on-projects:
+ by-test-platform:
+ linux64-ccov/opt: []
+ default: ['all']
+ chunks:
+ by-test-platform:
+ # win.*: 1
+ macosx.*: 1
+ linux64/debug: 10
+ default: 8
+ max-run-time: 5400
+ e10s: false
+ mozharness:
+ script: desktop_unittest.py
+ no-read-buildbot-config: true
+ config:
+ by-test-platform:
+ win.*:
+ - unittests/win_taskcluster_unittest.py
+ macosx.*:
+ - remove_executables.py
+ - unittests/mac_unittest.py
+ default:
+ - unittests/linux_unittest.py
+ - remove_executables.py
+ extra-options:
+ by-test-platform:
+ linux64-ccov/opt:
+ - --xpcshell-suite=xpcshell
+ - --code-coverage
+ default:
+ - --xpcshell-suite=xpcshell
+ # Bug 1281241: migrating to m3.large instances
+ instance-size: legacy
+ allow-software-gl-layers: false
diff --git a/taskcluster/ci/docker-image/image.yml b/taskcluster/ci/docker-image/image.yml
new file mode 100644
index 000000000..f9a1113f0
--- /dev/null
+++ b/taskcluster/ci/docker-image/image.yml
@@ -0,0 +1,68 @@
+---
+task:
+ created:
+ relative-datestamp: "0 seconds"
+ deadline:
+ relative-datestamp: "24 hours"
+ metadata:
+ name: 'Docker Image Build: {{image_name}}'
+ description: 'Build the docker image {{image_name}} for use by dependent tasks'
+ source: '{{source}}'
+ owner: mozilla-taskcluster-maintenance@mozilla.com
+ tags:
+ createdForUser: '{{owner}}'
+
+ workerType: taskcluster-images
+ provisionerId: aws-provisioner-v1
+ schedulerId: task-graph-scheduler
+
+ routes:
+ # Indexing routes to avoid building the same image twice
+ - index.{{index_image_prefix}}.level-{{level}}.{{image_name}}.latest
+ - index.{{index_image_prefix}}.level-{{level}}.{{image_name}}.pushdate.{{year}}.{{month}}-{{day}}-{{pushtime}}
+ - index.{{index_image_prefix}}.level-{{level}}.{{image_name}}.hash.{{context_hash}}
+ # Treeherder routes
+ - tc-treeherder.v2.{{project}}.{{head_rev}}.{{pushlog_id}}
+ - tc-treeherder-stage.v2.{{project}}.{{head_rev}}.{{pushlog_id}}
+
+ scopes:
+ - secrets:get:project/taskcluster/gecko/hgfingerprint
+ - docker-worker:cache:level-{{level}}-imagebuilder-v1
+
+ payload:
+ env:
+ HASH: '{{context_hash}}'
+ PROJECT: '{{project}}'
+ CONTEXT_URL: '{{context_url}}'
+ IMAGE_NAME: '{{image_name}}'
+ GECKO_BASE_REPOSITORY: '{{base_repository}}'
+ GECKO_HEAD_REPOSITORY: '{{head_repository}}'
+ GECKO_HEAD_REV: '{{head_rev}}'
+ HG_STORE_PATH: '/home/worker/checkouts/hg-store'
+ cache:
+ 'level-{{level}}-imagebuilder-v1': '/home/worker/checkouts'
+ features:
+ dind: true
+ chainOfTrust: true
+ taskclusterProxy: true
+ image: '{{#docker_image}}image_builder{{/docker_image}}'
+ maxRunTime: 3600
+ artifacts:
+ '{{artifact_path}}':
+ type: 'file'
+ path: '/home/worker/workspace/artifacts/image.tar.zst'
+ expires:
+ relative-datestamp: "1 year"
+ extra:
+ imageMeta: # Useful when converting back from JSON in action tasks
+ level: '{{level}}'
+ contextHash: '{{context_hash}}'
+ imageName: '{{image_name}}'
+ treeherderEnv:
+ - staging
+ - production
+ treeherder:
+ jobKind: other
+ build:
+ platform: 'taskcluster-images'
+ groupSymbol: 'I'
diff --git a/taskcluster/ci/docker-image/kind.yml b/taskcluster/ci/docker-image/kind.yml
new file mode 100644
index 000000000..d1c118c8e
--- /dev/null
+++ b/taskcluster/ci/docker-image/kind.yml
@@ -0,0 +1,19 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: 'taskgraph.task.docker_image:DockerImageTask'
+images_path: '../../../testing/docker'
+
+# make a task for each docker-image we might want. For the moment, since we
+# write artifacts for each, these are whitelisted, but ideally that will change
+# (to use subdirectory clones of the proper directory), at which point we can
+# generate tasks for every docker image in the directory, secure in the
+# knowledge that unnecessary images will be omitted from the target task graph
+images:
+ desktop-test: dt
+ desktop1604-test: dt16t
+ desktop-build: db
+ tester: tst
+ lint: lnt
+ android-gradle-build: agb
diff --git a/taskcluster/ci/hazard/kind.yml b/taskcluster/ci/hazard/kind.yml
new file mode 100644
index 000000000..4849cdbd6
--- /dev/null
+++ b/taskcluster/ci/hazard/kind.yml
@@ -0,0 +1,58 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+job-defaults:
+ treeherder:
+ kind: build
+ tier: 1
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ docker-image: {in-tree: desktop-build}
+
+jobs:
+ linux64-shell-haz/debug:
+ description: "JS Shell Hazard Analysis Linux"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: shell-haz-debug
+ treeherder:
+ platform: linux64/debug
+ symbol: SM-tc(H)
+ run:
+ using: hazard
+ tooltool-manifest: "browser/config/tooltool-manifests/linux64/hazard.manifest"
+ command: >
+ cd /home/worker/checkouts/gecko/taskcluster/scripts/builder
+ && ./build-haz-linux.sh --project shell $HOME/workspace
+ when:
+ files-changed:
+ - js/public/**
+ - js/src/**
+
+ linux64-haz/debug:
+ description: "Browser Hazard Analysis Linux"
+ index:
+ product: firefox
+ job-name:
+ gecko-v2: browser-haz-debug
+ treeherder:
+ platform: linux64/debug
+ symbol: tc(H)
+ run:
+ using: hazard
+ tooltool-manifest: "browser/config/tooltool-manifests/linux64/hazard.manifest"
+ mozconfig: "browser/config/mozconfigs/linux64/hazards"
+ command: >
+ cd /home/worker/checkouts/gecko/taskcluster/scripts/builder
+ && ./build-haz-linux.sh --project browser $HOME/workspace
diff --git a/taskcluster/ci/l10n/kind.yml b/taskcluster/ci/l10n/kind.yml
new file mode 100644
index 000000000..dfd011e9a
--- /dev/null
+++ b/taskcluster/ci/l10n/kind.yml
@@ -0,0 +1,89 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# NOTE: please write a description of this kind in taskcluster/docs/kinds.rst
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.l10n:transforms
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+job-defaults:
+ index:
+ product: firefox
+ treeherder:
+ kind: build
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build}
+ max-run-time: 36000
+
+ # don't run anywhere by default, but still available via try
+ run-on-projects: []
+ run:
+ # NOTE: this should really be a different "using" since it's using
+ # build-l10n.sh instead of build-linux.sh. Preferably, build-linux.sh
+ # and the mozharness run implementation should be modified to support
+ # the functionality that l10n needs
+ using: mozharness
+ job-script: taskcluster/scripts/builder/build-l10n.sh
+ when:
+ files-changed:
+ - browser/locales/all-locales
+ - python/compare-locales/**
+ - testing/mozharness/configs/single_locale/**
+ - testing/mozharness/mozharness/mozilla/l10n/locales.py
+ - testing/mozharness/scripts/desktop_l10n.py
+ - toolkit/locales/**
+ - toolkit/mozapps/installer/**
+
+jobs:
+ linux-l10n/opt:
+ description: "Localization"
+ index:
+ job-name:
+ gecko-v2: linux32-l10n-opt
+ treeherder:
+ platform: linux32/opt
+ symbol: tc(L10n)
+ run:
+ script: mozharness/scripts/desktop_l10n.py
+ actions: [clone-locales list-locales setup repack summary]
+ config:
+ - single_locale/tc_linux32.py
+ options:
+ - environment-config=single_locale/production.py
+ - branch-config=single_locale/{project}.py
+ - platform-config=single_locale/linux32.py
+ - total-chunks=1
+ - this-chunk=1
+ tooltool-downloads: public
+ need-xvfb: true
+
+ linux64-l10n/opt:
+ description: "Localization"
+ index:
+ job-name:
+ gecko-v2: linux64-l10n-opt
+ treeherder:
+ platform: linux64/opt
+ symbol: tc(L10n)
+ run:
+ script: mozharness/scripts/desktop_l10n.py
+ actions: [clone-locales list-locales setup repack summary]
+ config:
+ - single_locale/tc_linux64.py
+ options:
+ - environment-config=single_locale/production.py
+ - branch-config=single_locale/{project}.py
+ - platform-config=single_locale/linux64.py
+ - total-chunks=1
+ - this-chunk=1
+ tooltool-downloads: public
+ need-xvfb: true
diff --git a/taskcluster/ci/marionette-harness/kind.yml b/taskcluster/ci/marionette-harness/kind.yml
new file mode 100644
index 000000000..8ec8189b9
--- /dev/null
+++ b/taskcluster/ci/marionette-harness/kind.yml
@@ -0,0 +1,51 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# NOTE: please write a description of this kind in taskcluster/docs/kinds.rst
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.marionette_harness:transforms
+ - taskgraph.transforms.task:transforms
+
+# NOTE: this task should be refactored so that it is invoked as a job either
+# with a run.using of "mozharness", and combined with the source-check kind.
+
+jobs:
+ marionette-harness/opt:
+ description: "Marionette harness unit test"
+ attributes:
+ build_platform: marionette-harness
+ build_type: opt
+ treeherder:
+ platform: linux64/opt
+ kind: test
+ tier: 2
+ symbol: tc(Mn-h)
+ worker-type: aws-provisioner-v1/gecko-t-linux-xlarge
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build} # NOTE: better to use the lint image
+ env:
+ JOB_SCRIPT: "taskcluster/scripts/tester/harness-test-linux.sh"
+ MOZHARNESS_SCRIPT: "testing/mozharness/scripts/marionette_harness_tests.py"
+ TOOLS_DISABLE: "true"
+ TOOLTOOL_REPO: "https://github.com/mozilla/build-tooltool"
+ TOOLTOOL_REV: "master"
+ artifacts:
+ - name: public/logs/
+ path: /home/worker/workspace/mozharness_workspace/upload/logs/
+ type: directory
+ command:
+ - "bash"
+ - "/home/worker/bin/build.sh"
+ - "--tests=testing/marionette/harness/marionette_harness/tests/harness_unit"
+ - "--work-dir=mozharness_workspace"
+ max-run-time: 1800
+ when:
+ files-changed:
+ - "testing/marionette/harness/**"
+ - "testing/mozharness/scripts/marionette_harness_tests.py"
+ - "testing/config/marionette_harness_test_requirements.txt"
diff --git a/taskcluster/ci/source-check/doc.yml b/taskcluster/ci/source-check/doc.yml
new file mode 100644
index 000000000..0ab91c518
--- /dev/null
+++ b/taskcluster/ci/source-check/doc.yml
@@ -0,0 +1,32 @@
+sphinx/opt:
+ description: Generate the Sphinx documentation
+ treeherder:
+ symbol: tc(Doc)
+ kind: test
+ tier: 1
+ platform: lint/opt
+ worker-type: aws-provisioner-v1/b2gtest
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: "lint"}
+ max-run-time: 1800
+ artifacts:
+ - type: file
+ name: public/docs.tar.gz
+ path: /home/worker/checkouts/gecko/docs.tar.gz
+ run:
+ using: run-task
+ command: >
+ cd /home/worker/checkouts/gecko &&
+ ./mach doc --outdir docs-out --no-open &&
+ rm -rf docs-out/html/Mozilla_Source_Tree_Docs/_venv &&
+ mv docs-out/html/Mozilla_Source_Tree_Docs docs &&
+ tar -czf docs.tar.gz docs
+ run-on-projects:
+ - integration
+ - release
+ when:
+ files-changed:
+ - '**/*.py'
+ - '**/*.rst'
+ - 'tools/docs/**'
diff --git a/taskcluster/ci/source-check/kind.yml b/taskcluster/ci/source-check/kind.yml
new file mode 100644
index 000000000..6bc2b4b83
--- /dev/null
+++ b/taskcluster/ci/source-check/kind.yml
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+jobs-from:
+ - python-tests.yml
+ - mozlint.yml
+ - doc.yml
diff --git a/taskcluster/ci/source-check/mozlint.yml b/taskcluster/ci/source-check/mozlint.yml
new file mode 100644
index 000000000..fd22a9f8f
--- /dev/null
+++ b/taskcluster/ci/source-check/mozlint.yml
@@ -0,0 +1,97 @@
+mozlint-eslint/opt:
+ description: JS lint check
+ treeherder:
+ symbol: ES
+ kind: test
+ tier: 1
+ platform: lint/opt
+ worker-type: aws-provisioner-v1/b2gtest
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: "lint"}
+ max-run-time: 1800
+ run:
+ using: run-task
+ command: >
+ cd /home/worker/checkouts/gecko/tools/lint/eslint &&
+ /build/tooltool.py fetch -m manifest.tt &&
+ tar xvfz eslint.tar.gz &&
+ rm eslint.tar.gz &&
+ ln -s ../eslint-plugin-mozilla node_modules &&
+ cd ../../.. &&
+ ./mach lint -l eslint -f treeherder --quiet
+ run-on-projects:
+ - integration
+ - release
+ when:
+ files-changed:
+ # Files that are likely audited.
+ - '**/*.js'
+ - '**/*.jsm'
+ - '**/*.jsx'
+ - '**/*.html'
+ - '**/*.xhtml'
+ - '**/*.xml'
+ # Run when eslint policies change.
+ - '**/.eslintignore'
+ - '**/*eslintrc*'
+ # The plugin implementing custom checks.
+ - 'tools/lint/eslint/eslint-plugin-mozilla/**'
+ # Other misc lint related files.
+ - 'python/mozlint/**'
+ - 'tools/lint/**'
+ - 'testing/docker/lint/**'
+
+mozlint-flake8/opt:
+ description: flake8 run over the gecko codebase
+ treeherder:
+ symbol: f8
+ kind: test
+ tier: 1
+ platform: lint/opt
+ worker-type: aws-provisioner-v1/b2gtest
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: "lint"}
+ max-run-time: 1800
+ run:
+ using: mach
+ mach: lint -l flake8 -f treeherder
+ run-on-projects:
+ - integration
+ - release
+ when:
+ files-changed:
+ - '**/*.py'
+ - '**/.flake8'
+ - 'python/mozlint/**'
+ - 'tools/lint/**'
+ - 'testing/docker/lint/**'
+
+wptlint-gecko/opt:
+ description: web-platform-tests linter
+ treeherder:
+ symbol: W
+ kind: test
+ tier: 1
+ platform: lint/opt
+ worker-type: aws-provisioner-v1/b2gtest
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: "lint"}
+ max-run-time: 1800
+ run:
+ using: mach
+ mach: lint -l wpt -l wpt_manifest -f treeherder
+ run-on-projects:
+ - integration
+ - release
+ when:
+ files-changed:
+ - 'testing/web-platform/tests/**'
+ - 'testing/web-platform/mozilla/tests/**'
+ - 'testing/web-platform/meta/MANIFEST.json'
+ - 'testing/web-platform/mozilla/meta/MANIFEST.json'
+ - 'python/mozlint/**'
+ - 'tools/lint/**'
+ - 'testing/docker/lint/**'
diff --git a/taskcluster/ci/source-check/python-tests.yml b/taskcluster/ci/source-check/python-tests.yml
new file mode 100644
index 000000000..2f580f251
--- /dev/null
+++ b/taskcluster/ci/source-check/python-tests.yml
@@ -0,0 +1,49 @@
+taskgraph-tests/opt:
+ description: taskcluster/taskgraph unit tests
+ treeherder:
+ symbol: tg
+ kind: test
+ tier: 2
+ platform: linux64/opt
+ worker-type: aws-provisioner-v1/b2gtest
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: "lint"}
+ max-run-time: 1800
+ run:
+ using: mach
+ mach: taskgraph python-tests
+ run-on-projects:
+ - integration
+ - release
+ when:
+ files-changed:
+ - 'taskcluster/**/*.py'
+ - 'config/mozunit.py'
+ - 'python/mach/**/*.py'
+
+mozharness/opt:
+ description: mozharness integration tests
+ treeherder:
+ symbol: MH
+ kind: test
+ tier: 2
+ platform: lint/opt
+ worker-type: aws-provisioner-v1/b2gtest
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: "lint"}
+ max-run-time: 1800
+ run:
+ using: run-task
+ cache-dotcache: true
+ command: >
+ cd /home/worker/checkouts/gecko/testing/mozharness &&
+ /usr/bin/pip2 install tox &&
+ /home/worker/.local/bin/tox -e py27-hg3.7
+ run-on-projects:
+ - integration
+ - release
+ when:
+ files-changed:
+ - 'testing/mozharness/**'
diff --git a/taskcluster/ci/spidermonkey/kind.yml b/taskcluster/ci/spidermonkey/kind.yml
new file mode 100644
index 000000000..9e8f9a682
--- /dev/null
+++ b/taskcluster/ci/spidermonkey/kind.yml
@@ -0,0 +1,199 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+job-defaults:
+ treeherder:
+ platform: linux64/opt
+ kind: build
+ tier: 1
+ index:
+ product: firefox
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ max-run-time: 36000
+ docker-image: {in-tree: desktop-build}
+ run:
+ using: spidermonkey
+ when:
+ files-changed:
+ # any when.files-changed specified below in a job will be
+ # appended to this list
+ - js/public/**
+ - js/src/**
+
+jobs:
+ sm-package/opt:
+ description: "Spidermonkey source package and test"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-package-opt
+ treeherder:
+ symbol: SM-tc(pkg)
+ run:
+ using: spidermonkey-package
+ spidermonkey-variant: plain
+ when:
+ files-changed:
+ - build/**
+ - config/**
+ - configure.py
+ - dom/bindings/**
+ - intl/icu/**
+ - js/moz.configure
+ - layout/tools/reftest/reftest/**
+ - Makefile.in
+ - media/webrtc/trunk/tools/gyp/**
+ - memory/**
+ - mfbt/**
+ - modules/fdlibm/**
+ - modules/zlib/src/**
+ - mozglue/**
+ - moz.build
+ - moz.configure
+ - nsprpub/**
+ - python/**
+ - taskcluster/moz.build
+ - testing/mozbase/**
+ - test.mozbuild
+ - toolkit/mozapps/installer/package-name.mk
+ - toolkit/mozapps/installer/upload-files.mk
+
+ sm-mozjs-sys/debug:
+ description: "Build js/src as the mozjs_sys Rust crate"
+ index:
+ job-name:
+ gecko-v2: sm-mozjs-sys-debug
+ treeherder:
+ symbol: SM-tc(mozjs-crate)
+ run:
+ using: spidermonkey-mozjs-crate
+ spidermonkey-variant: plain
+ run-on-projects:
+ - integration
+ - release
+ - try
+
+ sm-plain/debug:
+ description: "Spidermonkey Plain"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-plaindebug-debug
+ treeherder:
+ platform: linux64/debug
+ symbol: SM-tc(p)
+ run:
+ spidermonkey-variant: plaindebug
+
+ sm-plain/opt:
+ description: "Spidermonkey Plain"
+ index:
+ job-name: sm-plain-opt
+ treeherder:
+ symbol: SM-tc(p)
+ run:
+ spidermonkey-variant: plain
+
+ sm-arm-sim/debug:
+ description: "Spidermonkey ARM sim"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-arm-sim-debug
+ treeherder:
+ symbol: SM-tc(arm)
+ run:
+ spidermonkey-variant: arm-sim
+
+ sm-arm64-sim/debug:
+ description: "Spidermonkey ARM64 sim"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-arm64-sim-debug
+ treeherder:
+ symbol: SM-tc(arm64)
+ run:
+ spidermonkey-variant: arm64-sim
+
+ sm-asan/opt:
+ description: "Spidermonkey Address Sanitizer"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-asan-opt
+ treeherder:
+ symbol: SM-tc(asan)
+ run:
+ spidermonkey-variant: asan
+ tooltool-manifest: browser/config/tooltool-manifests/linux64/asan.manifest
+
+ sm-compacting/debug:
+ description: "Spidermonkey Compacting"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-compacting-debug
+ treeherder:
+ symbol: SM-tc(cgc)
+ run:
+ spidermonkey-variant: compacting
+
+ sm-msan/opt:
+ description: "Spidermonkey Memory Sanitizer"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-msan-opt
+ treeherder:
+ symbol: SM-tc(msan)
+ run:
+ spidermonkey-variant: msan
+ tooltool-manifest: browser/config/tooltool-manifests/linux64/msan.manifest
+
+ sm-tsan/opt:
+ description: "Spidermonkey Thread Sanitizer"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-tsan-opt
+ treeherder:
+ symbol: SM-tc(tsan)
+ tier: 3
+ run-on-projects: []
+ run:
+ spidermonkey-variant: tsan
+ tooltool-manifest: browser/config/tooltool-manifests/linux64/tsan.manifest
+
+ sm-rootanalysis/debug:
+ description: "Spidermonkey Root Analysis"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-rootanalysis-debug
+ treeherder:
+ symbol: SM-tc(r)
+ run:
+ spidermonkey-variant: rootanalysis
+
+ sm-nonunified/debug:
+ description: "Spidermonkey Non-Unified Debug"
+ index:
+ job-name:
+ buildbot: sm-plain
+ gecko-v2: sm-nonunified-debug
+ treeherder:
+ platform: linux64/debug
+ symbol: SM-tc(nu)
+ run:
+ spidermonkey-variant: nonunified
diff --git a/taskcluster/ci/static-analysis/kind.yml b/taskcluster/ci/static-analysis/kind.yml
new file mode 100644
index 000000000..ddd312e70
--- /dev/null
+++ b/taskcluster/ci/static-analysis/kind.yml
@@ -0,0 +1,63 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+job-defaults:
+ index:
+ product: firefox
+ treeherder:
+ symbol: S
+ kind: build
+ tier: 1
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build}
+ max-run-time: 36000
+
+jobs:
+ macosx64-st-an/opt:
+ description: "MacOS X x64 Cross-compile Static Analysis"
+ index:
+ job-name: macosx64-st-an-opt
+ treeherder:
+ platform: osx-10-7/opt
+ worker-type: aws-provisioner-v1/gecko-{level}-b-macosx64
+ run:
+ using: mozharness
+ actions: [get-secrets build generate-build-stats update]
+ config:
+ - builds/releng_base_mac_64_cross_builds.py
+ - balrog/production.py
+ # Note that, despite the name "cross-opt", this config variant
+ # enables static analysis.
+ custom-build-variant-cfg: cross-opt
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: internal
+ keep-artifacts: false
+
+ linux64-st-an/opt:
+ description: "Linux64 Opt Static Analysis"
+ index:
+ job-name: linux64-st-an-opt
+ treeherder:
+ platform: linux64/opt
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ run:
+ using: mozharness
+ actions: [get-secrets build generate-build-stats]
+ config:
+ - builds/releng_sub_linux_configs/64_stat_and_opt.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+ keep-artifacts: false
diff --git a/taskcluster/ci/toolchain/kind.yml b/taskcluster/ci/toolchain/kind.yml
new file mode 100644
index 000000000..acc67a57f
--- /dev/null
+++ b/taskcluster/ci/toolchain/kind.yml
@@ -0,0 +1,14 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+jobs-from:
+ - linux.yml
+ - windows.yml
diff --git a/taskcluster/ci/toolchain/linux.yml b/taskcluster/ci/toolchain/linux.yml
new file mode 100644
index 000000000..9eeadf3ef
--- /dev/null
+++ b/taskcluster/ci/toolchain/linux.yml
@@ -0,0 +1,66 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+linux64-clang/opt:
+ description: "Clang toolchain build"
+ treeherder:
+ kind: build
+ platform: linux64/opt
+ symbol: Cc(Clang)
+ tier: 1
+ run:
+ using: toolchain-script
+ script: build-clang-linux.sh
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build}
+ max-run-time: 36000
+ when:
+ files-changed:
+ - 'build/build-clang/**'
+ - 'taskcluster/scripts/misc/build-clang-linux.sh'
+ - 'taskcluster/taskgraph/transforms/job/toolchain.py'
+
+linux64-gcc/opt:
+ description: "GCC toolchain build"
+ treeherder:
+ kind: build
+ platform: linux64/opt
+ symbol: Cc(GCC)
+ tier: 1
+ run:
+ using: toolchain-script
+ script: build-gcc-linux.sh
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build}
+ max-run-time: 36000
+ when:
+ files-changed:
+ - 'build/unix/build-gcc/**'
+ - 'taskcluster/scripts/misc/build-gcc-linux.sh'
+ - 'taskcluster/taskgraph/transforms/job/toolchain.py'
+
+linux64-binutils/opt:
+ description: "Binutils toolchain build"
+ treeherder:
+ kind: build
+ platform: linux64/opt
+ symbol: Cc(binutils)
+ tier: 1
+ run:
+ using: toolchain-script
+ script: build-binutils-linux.sh
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build}
+ max-run-time: 36000
+ when:
+ files-changed:
+ - 'build/unix/build-binutils/**'
+ - 'taskcluster/scripts/misc/build-binutils-linux.sh'
+ - 'taskcluster/taskgraph/transforms/job/toolchain.py'
diff --git a/taskcluster/ci/toolchain/windows.yml b/taskcluster/ci/toolchain/windows.yml
new file mode 100644
index 000000000..b5b950616
--- /dev/null
+++ b/taskcluster/ci/toolchain/windows.yml
@@ -0,0 +1,23 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+win32-clang-cl/opt:
+ description: "Clang-cl toolchain build"
+ treeherder:
+ kind: build
+ platform: windows-2012-32/opt
+ symbol: Cc(ClangCL)
+ tier: 2
+ worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+ worker:
+ implementation: generic-worker
+ max-run-time: 36000
+ run:
+ using: toolchain-script
+ script: build-clang-windows.sh
+ when:
+ files-changed:
+ - 'build/build-clang/**'
+ - 'taskcluster/scripts/misc/build-clang-windows.sh'
+ - 'taskcluster/taskgraph/transforms/job/toolchain.py'
diff --git a/taskcluster/ci/upload-symbols/job-template.yml b/taskcluster/ci/upload-symbols/job-template.yml
new file mode 100644
index 000000000..43d6736a0
--- /dev/null
+++ b/taskcluster/ci/upload-symbols/job-template.yml
@@ -0,0 +1,19 @@
+label: # see transforms
+description: Upload Symbols
+dependencies: # see transforms
+expires-after: 7 days
+deadline-after: 24 hours
+run-on-projects:
+ - try
+worker-type: aws-provisioner-v1/symbol-upload
+worker:
+ implementation: docker-worker
+ max-run-time: 600
+ command: ["/bin/bash", "bin/upload.sh"]
+ docker-image: taskclusterprivate/upload_symbols:0.0.4
+ env:
+ GECKO_HEAD_REPOSITORY: # see transforms
+ GECKO_HEAD_REV: # see transforms
+ ARTIFACT_TASKID: {"task-reference": "<build>"}
+scopes:
+ - docker-worker:image:taskclusterprivate/upload_symbols:0.0.4
diff --git a/taskcluster/ci/upload-symbols/kind.yml b/taskcluster/ci/upload-symbols/kind.yml
new file mode 100644
index 000000000..b3e2ba5d8
--- /dev/null
+++ b/taskcluster/ci/upload-symbols/kind.yml
@@ -0,0 +1,19 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.post_build:PostBuildTask
+
+transforms:
+ - taskgraph.transforms.upload_symbols:transforms
+ - taskgraph.transforms.task:transforms
+
+kind-dependencies:
+ - build
+
+job-template: job-template.yml
+
+only-for-build-platforms:
+ - linux64/opt
+ - linux64/debug
+ - android-api-15/opt
diff --git a/taskcluster/ci/valgrind/kind.yml b/taskcluster/ci/valgrind/kind.yml
new file mode 100644
index 000000000..77e37e89b
--- /dev/null
+++ b/taskcluster/ci/valgrind/kind.yml
@@ -0,0 +1,40 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+implementation: taskgraph.task.transform:TransformTask
+
+transforms:
+ - taskgraph.transforms.build_attrs:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+jobs:
+ linux64-valgrind/opt:
+ description: "Linux64 Valgrind Opt"
+ index:
+ product: firefox
+ job-name: linux64-valgrind-opt
+ treeherder:
+ platform: linux64/opt
+ symbol: tc(V)
+ kind: build
+ tier: 1
+ worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+ worker:
+ implementation: docker-worker
+ docker-image: {in-tree: desktop-build}
+ max-run-time: 72000
+ run:
+ using: mozharness
+ actions: [get-secrets build valgrind-test generate-build-stats]
+ custom-build-variant-cfg: valgrind
+ config:
+ - builds/releng_base_linux_64_builds.py
+ - balrog/production.py
+ script: "mozharness/scripts/fx_desktop_build.py"
+ secrets: true
+ tooltool-downloads: public
+ need-xvfb: true
+
+
diff --git a/taskcluster/docs/attributes.rst b/taskcluster/docs/attributes.rst
new file mode 100644
index 000000000..d93964d65
--- /dev/null
+++ b/taskcluster/docs/attributes.rst
@@ -0,0 +1,124 @@
+===============
+Task Attributes
+===============
+
+Tasks can be filtered, for example to support "try" pushes which only perform a
+subset of the task graph or to link dependent tasks. This filtering is the
+difference between a full task graph and a target task graph.
+
+Filtering takes place on the basis of attributes. Each task has a dictionary
+of attributes and filters over those attributes can be expressed in Python. A
+task may not have a value for every attribute.
+
+The attributes, and acceptable values, are defined here. In general, attribute
+names and values are the short, lower-case form, with underscores.
+
+kind
+====
+
+A task's ``kind`` attribute gives the name of the kind that generated it, e.g.,
+``build`` or ``spidermonkey``.
+
+run_on_projects
+===============
+
+The projects where this task should be in the target task set. This is how
+requirements like "only run this on inbound" get implemented. These are
+either project names or the aliases
+
+ * `integration` -- integration branches
+ * `release` -- release branches including mozilla-central
+ * `all` -- everywhere (the default)
+
+For try, this attribute applies only if ``-p all`` is specified. All jobs can
+be specified by name regardless of ``run_on_projects``.
+
+If ``run_on_projects`` is set to an empty list, then the task will not run
+anywhere, unless its build platform is specified explicitly in try syntax.
+
+task_duplicates
+===============
+
+This is used to indicate that we want multiple copies of the task created.
+This feature is used to track down intermittent job failures.
+
+If this value is set to N, the task-creation machinery will create a total of N
+copies of the task. Only the first copy will be included in the taskgraph
+output artifacts, although all tasks will be contained in the same taskGroup.
+
+While most attributes are considered read-only, target task methods may alter
+this attribute of tasks they include in the target set.
+
+build_platform
+==============
+
+The build platform defines the platform for which the binary was built. It is
+set for both build and test jobs, although test jobs may have a different
+``test_platform``.
+
+build_type
+==========
+
+The type of build being performed. This is a subdivision of ``build_platform``,
+used for different kinds of builds that target the same platform. Values are
+
+ * ``debug``
+ * ``opt``
+
+test_platform
+=============
+
+The test platform defines the platform on which tests are run. It is only
+defined for test jobs and may differ from ``build_platform`` when the same binary
+is tested on several platforms (for example, on several versions of Windows).
+This applies for both talos and unit tests.
+
+Unlike build_platform, the test platform is represented in a slash-separated
+format, e.g., ``linux64/opt``.
+
+unittest_suite
+==============
+
+This is the unit test suite being run in a unit test task. For example,
+``mochitest`` or ``cppunittest``.
+
+unittest_flavor
+===============
+
+If a unittest suite has subdivisions, those are represented as flavors. Not
+all suites have flavors, in which case this attribute should be set to match
+the suite. Examples: ``mochitest-devtools-chrome-chunked`` or ``a11y``.
+
+unittest_try_name
+=================
+
+This is the name used to refer to a unit test via try syntax. It
+may not match either of ``unittest_suite`` or ``unittest_flavor``.
+
+talos_try_name
+==============
+
+This is the name used to refer to a talos job via try syntax.
+
+test_chunk
+==========
+
+This is the chunk number of a chunked test suite (talos or unittest). Note
+that this is a string!
+
+e10s
+====
+
+For test suites which distinguish whether they run with or without e10s, this
+boolean value identifies this particular run.
+
+image_name
+==========
+
+For the ``docker_image`` kind, this attribute contains the docker image name.
+
+nightly
+=======
+
+Signals whether the task is part of a nightly graph. Useful when filtering
+out nightly tasks from full task set at target stage.
diff --git a/taskcluster/docs/caches.rst b/taskcluster/docs/caches.rst
new file mode 100644
index 000000000..9f19035d7
--- /dev/null
+++ b/taskcluster/docs/caches.rst
@@ -0,0 +1,43 @@
+.. taskcluster_caches:
+
+=============
+Common Caches
+=============
+
+There are various caches used by the in-tree tasks. This page attempts to
+document them and their appropriate use.
+
+Version Control Caches
+======================
+
+``level-{{level}}-checkouts-{{version}}``
+ This cache holds version control checkouts, each in a subdirectory named
+ after the repo (e.g., ``gecko``).
+
+ Checkouts should be read-only. If a task needs to create new files from
+ content of a checkout, this content should be written in a separate
+ directory/cache (like a workspace).
+
+ A ``version`` parameter appears in the cache name to allow
+ backwards-incompatible changes to the cache's behavior.
+
+``level-{{level}}-{{project}}-tc-vcs`` (deprecated)
+ This cache is used internally by ``tc-vcs``. This tool is deprecated and
+ should be replaced with ``hg robustcheckout``.
+
+Workspace Caches
+================
+
+``level-{{level}}-*-workspace``
+ These caches (of various names typically ending with ``workspace``)
+ contain state to be shared between task invocations. Use cases are
+ dependent on the task.
+
+Other
+=====
+
+``tooltool-cache``
+ Tooltool invocations should use this cache. Tooltool will store files here
+ indexed by their hash, and will verify hashes before copying files from
+ this directory, so there is no concern with sharing the cache between jobs
+ of different levels.
diff --git a/taskcluster/docs/docker-images.rst b/taskcluster/docs/docker-images.rst
new file mode 100644
index 000000000..22dea4dea
--- /dev/null
+++ b/taskcluster/docs/docker-images.rst
@@ -0,0 +1,42 @@
+.. taskcluster_dockerimages:
+
+=============
+Docker Images
+=============
+
+TaskCluster Docker images are defined in the source directory under
+``testing/docker``. Each directory therein contains the name of an
+image used as part of the task graph.
+
+Adding Extra Files to Images
+============================
+
+Dockerfile syntax has been extended to allow *any* file from the
+source checkout to be added to the image build *context*. (Traditionally
+you can only ``ADD`` files from the same directory as the Dockerfile.)
+
+Simply add the following syntax as a comment in a Dockerfile::
+
+ # %include <path>
+
+e.g.
+
+ # %include mach
+ # %include testing/mozharness
+
+The argument to ``# %include`` is a relative path from the root level of
+the source directory. It can be a file or a directory. If a file, only that
+file will be added. If a directory, every file under that directory will be
+added (even files that are untracked or ignored by version control).
+
+Files added using ``# %include`` syntax are available inside the build
+context under the ``topsrcdir/`` path.
+
+Files are added as they exist on disk. e.g. executable flags should be
+preserved. However, the file owner/group is changed to ``root`` and the
+``mtime`` of the file is normalized.
+
+Here is an example Dockerfile snippet::
+
+ # %include mach
+ ADD topsrcdir/mach /home/worker/mach
diff --git a/taskcluster/docs/how-tos.rst b/taskcluster/docs/how-tos.rst
new file mode 100644
index 000000000..6b143dd42
--- /dev/null
+++ b/taskcluster/docs/how-tos.rst
@@ -0,0 +1,220 @@
+How Tos
+=======
+
+All of this equipment is here to help you get your work done more efficiently.
+However, learning how task-graphs are generated is probably not the work you
+are interested in doing. This section should help you accomplish some of the
+more common changes to the task graph with minimal fuss.
+
+.. important::
+
+ If you cannot accomplish what you need with the information provided here,
+ please consider whether you can achieve your goal in a different way.
+ Perhaps something simpler would cost a bit more in compute time, but save
+ the much more expensive resource of developers' mental bandwidth.
+ Task-graph generation is already complex enough!
+
+ If you want to proceed, you may need to delve into the implementation of
+ task-graph generation. The documentation and code are designed to help, as
+ are the authors - ``hg blame`` may help track down helpful people.
+
+ As you write your new transform or add a new kind, please consider the next
+ developer. Where possible, make your change data-driven and general, so
+ that others can make a much smaller change. Document the semantics of what
+ you are changing clearly, especially if it involves modifying a transform
+ schema. And if you are adding complexity temporarily while making a
+ gradual transition, please open a new bug to remind yourself to remove the
+ complexity when the transition is complete.
+
+Hacking Task Graphs
+-------------------
+
+The recommended process for changing task graphs is this:
+
+1. Find a recent decision task on the project or branch you are working on,
+ and download its ``parameters.yml`` from the Task Inspector. This file
+ contains all of the inputs to the task-graph generation process. Its
+ contents are simple enough if you would like to modify it, and it is
+ documented in :doc:`parameters`.
+
+2. Run one of the ``mach taskgraph`` subcommands (see :doc:`taskgraph`) to
+ generate a baseline against which to measure your changes. For example:
+
+ .. code-block:: none
+
+ ./mach taskgraph tasks --json -p parameters.yml > old-tasks.json
+
+3. Make your modifications under ``taskcluster/``.
+
+4. Run the same ``mach taskgraph`` command, sending the output to a new file,
+ and use ``diff`` to compare the old and new files. Make sure your changes
+ have the desired effect and no undesirable side-effects.
+
+5. When you are satisfied with the changes, push them to try to ensure that the
+ modified tasks work as expected.
+
+Common Changes
+--------------
+
+Changing Test Characteristics
+.............................
+
+First, find the test description. This will be in
+``taskcluster/ci/*/tests.yml``, for the appropriate kind (consult
+:doc:`kinds`). You will find a YAML stanza for each test suite, and each
+stanza defines the test's characteristics. For example, the ``chunks``
+property gives the number of chunks to run. This can be specified as a simple
+integer if all platforms have the same chunk count, or it can be keyed by test
+platform. For example:
+
+.. code-block:: yaml
+
+ chunks:
+ by-test-platform:
+ linux64/debug: 10
+ default: 8
+
+The full set of available properties is in
+``taskcluster/taskgraph/transform/tests/test_description.py``. Some other
+commonly-modified properties are ``max-run-time`` (useful if tests are being
+killed for exceeding maxRunTime) and ``treeherder-symbol``.
+
+.. note::
+
+ Android tests are also chunked at the mozharness level, so you will need to
+ modify the relevant mozharness config, as well.
+
+Adding a Test Suite
+...................
+
+To add a new test suite, you will need to know the proper mozharness invocation
+for that suite, and which kind it fits into (consult :doc:`kinds`).
+
+Add a new stanza to ``taskcluster/ci/<kind>/tests.yml``, copying from the other
+stanzas in that file. The meanings should be clear, but authoritative
+documentation is in
+``taskcluster/taskgraph/transform/tests/test_description.py`` should you need
+it. The stanza name is the name by which the test will be referenced in try
+syntax.
+
+Add your new test to a test set in ``test-sets.yml`` in the same directory. If
+the test should only run on a limited set of platforms, you may need to define
+a new test set and reference that from the appropriate platforms in
+``test-platforms.yml``. If you do so, include some helpful comments in
+``test-sets.yml`` for the next person.
+
+Greening Up a New Test
+......................
+
+When a test is not yet reliably green, configuration for that test should not
+be landed on integration branches. Of course, you can control where the
+configuration is landed! For many cases, it is easiest to green up a test in
+try: push the configuration to run the test to try along with your work to fix
+the remaining test failures.
+
+When working with a group, check out a "twig" repository to share among your
+group, and land the test configuration in that repository. Once the test is
+green, merge to an integration branch and the test will begin running there as
+well.
+
+Adding a New Task
+.................
+
+If you are adding a new task that is not a test suite, there are a number of
+options. A few questions to consider:
+
+ * Is this a new build platform or variant that will produce an artifact to
+ be run through the usual test suites?
+
+ * Does this task depend on other tasks? Do other tasks depend on it?
+
+ * Is this one of a few related tasks, or will you need to generate a large
+ set of tasks using some programmatic means (for example, chunking)?
+
+ * How is the task actually excuted? Mozharness? Mach?
+
+ * What kind of environment does the task require?
+
+Armed with that information, you can choose among a few options for
+implementing this new task. Try to choose the simplest solution that will
+satisfy your near-term needs. Since this is all implemented in-tree, it
+is not difficult to refactor later when you need more generality.
+
+Existing Kind
+`````````````
+
+The simplest option is to add your task to an existing kind. This is most
+practical when the task "makes sense" as part of that kind -- for example, if
+your task is building an installer for a new platform using mozharness scripts
+similar to the existing build tasks, it makes most sense to add your task to
+the ``build`` kind. If you need some additional functionality in the kind,
+it's OK to modify the implementation as necessary, as long as the modification
+is complete and useful to the next developer to come along.
+
+New Kind
+````````
+
+The next option to consider is adding a new kind. A distinct kind gives you
+some isolation from other task types, which can be nice if you are adding an
+experimental kind of task.
+
+Kinds can range in complexity. The simplest sort of kind uses the
+``TransformTask`` implementation to read a list of jobs from the ``jobs`` key,
+and applies the standard ``job`` and ``task`` transforms:
+
+.. code-block:: yaml
+
+ implementation: taskgraph.task.transform:TransformTask
+ transforms:
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+ jobs:
+ - ..your job description here..
+
+Custom Kind Implementation
+``````````````````````````
+
+If your task depends on other tasks, then the decision of which tasks to create
+may require some code. For example, the ``upload-symbols`` kind iterates over
+the builds in the graph, generating a task for each one. This specific
+post-build behavior is implemented in the general
+``taskgraph.task.post_build:PostBuildTask`` kind implementation. If your task
+needs something more purpose-specific, then it may be time to write a new kind
+implementation.
+
+Custom Transforms
+`````````````````
+
+If your task needs to create many tasks from a single description, for example
+to implement chunking, it is time to implement some custom transforms. Ideally
+those transforms will produce job descriptions, so you can use the existing ``job``
+and ``task`` transforms:
+
+.. code-block:: yaml
+
+ transforms:
+ - taskgraph.transforms.my_stuff:transforms
+ - taskgraph.transforms.job:transforms
+ - taskgraph.transforms.task:transforms
+
+Similarly, if you need to include dynamic task defaults -- perhaps some feature
+is only available in level-3 repositories, or on specific projects -- then
+custom transforms are the appropriate tool. Try to keep transforms simple,
+single-purpose and well-documented!
+
+Custom Run-Using
+````````````````
+
+If the way your task is executed is unique (so, not a mach command or
+mozharness invocation), you can add a new implementation of the job
+description's "run" section. Before you do this, consider that it might be a
+better investment to modify your task to support invocation via mozharness or
+mach, instead. If this is not possible, then adding a new file in
+``taskcluster/taskgraph/transforms/jobs`` with a structure similar to its peers
+will make the new run-using option available for job descriptions.
+
+Something Else?
+...............
+
+If you make another change not described here that turns out to be simple or
+common, please include an update to this file in your patch.
diff --git a/taskcluster/docs/index.rst b/taskcluster/docs/index.rst
new file mode 100644
index 000000000..d1a5c600b
--- /dev/null
+++ b/taskcluster/docs/index.rst
@@ -0,0 +1,30 @@
+.. taskcluster_index:
+
+TaskCluster Task-Graph Generation
+=================================
+
+The ``taskcluster`` directory contains support for defining the graph of tasks
+that must be executed to build and test the Gecko tree. This is more complex
+than you might suppose! This implementation supports:
+
+ * A huge array of tasks
+ * Different behavior for different repositories
+ * "Try" pushes, with special means to select a subset of the graph for execution
+ * Optimization -- skipping tasks that have already been performed
+ * Extremely flexible generation of a variety of tasks using an approach of
+ incrementally transforming job descriptions into task definitions.
+
+This section of the documentation describes the process in some detail,
+referring to the source where necessary. If you are reading this with a
+particular goal in mind and would rather avoid becoming a task-graph expert,
+check out the :doc:`how-to section <how-tos>`.
+
+.. toctree::
+
+ taskgraph
+ loading
+ transforms
+ yaml-templates
+ docker-images
+ how-tos
+ reference
diff --git a/taskcluster/docs/kinds.rst b/taskcluster/docs/kinds.rst
new file mode 100644
index 000000000..44bddb360
--- /dev/null
+++ b/taskcluster/docs/kinds.rst
@@ -0,0 +1,144 @@
+Task Kinds
+==========
+
+This section lists and documents the available task kinds.
+
+build
+------
+
+Builds are tasks that produce an installer or other output that can be run by
+users or automated tests. This is more restrictive than most definitions of
+"build" in a Mozilla context: it does not include tasks that run build-like
+actions for static analysis or to produce instrumented artifacts.
+
+artifact-build
+--------------
+
+This kind performs an artifact build: one based on precompiled binaries
+discovered via the TaskCluster index. This task verifies that such builds
+continue to work correctly.
+
+hazard
+------
+
+Hazard builds are similar to "regular' builds, but use a compiler extension to
+extract a bunch of data from the build and then analyze that data looking for
+hazardous behaviors.
+
+l10n
+----
+
+TBD (Callek)
+
+source-check
+------------
+
+Source-checks are tasks that look at the Gecko source directly to check
+correctness. This can include linting, Python unit tests, source-code
+analysis, or measurement work -- basically anything that does not require a
+build.
+
+upload-symbols
+--------------
+
+Upload-symbols tasks run after builds and upload the symbols files generated by
+build tasks to Socorro for later use in crash analysis.
+
+valgrind
+--------
+
+Valgrind tasks produce builds instrumented by valgrind.
+
+static-analysis
+---------------
+
+Static analysis builds use the compiler to perform some detailed analysis of
+the source code while building. The useful output from these tasks are their
+build logs, and while they produce a binary, they do not upload it as an
+artifact.
+
+toolchain
+---------
+
+Toolchain builds create the compiler toolchains used to build Firefox. These
+will eventually be dependencies of the builds themselves, but for the moment
+are run manually via try pushes and the results uploaded to tooltool.
+
+spidermonkey
+------------
+
+Spidermonkey tasks check out the full gecko source tree, then compile only the
+spidermonkey portion. Each task runs specific tests after the build.
+
+marionette-harness
+------------------
+
+TBD (Maja)
+
+Tests
+-----
+
+Test tasks for Gecko products are divided into several kinds, but share a
+common implementation. The process goes like this, based on a set of YAML
+files named in ``kind.yml``:
+
+ * For each build task, determine the related test platforms based on the build
+ platform. For example, a Windows 2010 build might be tested on Windows 7
+ and Windows 10. Each test platform specifies a "test set" indicating which
+ tests to run. This is configured in the file named
+ ``test-platforms.yml``.
+
+ * Each test set is expanded to a list of tests to run. This is configured in
+ the file named by ``test-sets.yml``.
+
+ * Each named test is looked up in the file named by ``tests.yml`` to find a
+ test description. This test description indicates what the test does, how
+ it is reported to treeherder, and how to perform the test, all in a
+ platform-independent fashion.
+
+ * Each test description is converted into one or more tasks. This is
+ performed by a sequence of transforms defined in the ``transforms`` key in
+ ``kind.yml``. See :doc:`transforms`: for more information on these
+ transforms.
+
+ * The resulting tasks become a part of the task graph.
+
+.. important::
+
+ This process generates *all* test jobs, regardless of tree or try syntax.
+ It is up to a later stage of the task-graph generation (the target set) to
+ select the tests that will actually be performed.
+
+desktop-test
+............
+
+The ``desktop-test`` kind defines tests for Desktop builds. Its ``tests.yml``
+defines the full suite of desktop tests and their particulars, leaving it to
+the transforms to determine how those particulars apply to Linux, OS X, and
+Windows.
+
+android-test
+............
+
+The ``android-test`` kind defines tests for Android builds.
+
+It is very similar to ``desktop-test``, but the details of running the tests
+differ substantially, so they are defined separately.
+
+docker-image
+------------
+
+Tasks of the ``docker-image`` kind build the Docker images in which other
+Docker tasks run.
+
+The tasks to generate each docker image have predictable labels:
+``build-docker-image-<name>``.
+
+Docker images are built from subdirectories of ``testing/docker``, using
+``docker build``. There is currently no capability for one Docker image to
+depend on another in-tree docker image, without uploading the latter to a
+Docker repository
+
+The task definition used to create the image-building tasks is given in
+``image.yml`` in the kind directory, and is interpreted as a :doc:`YAML
+Template <yaml-templates>`.
diff --git a/taskcluster/docs/loading.rst b/taskcluster/docs/loading.rst
new file mode 100644
index 000000000..1fa3c50f1
--- /dev/null
+++ b/taskcluster/docs/loading.rst
@@ -0,0 +1,31 @@
+Loading Tasks
+=============
+
+The full task graph generation involves creating tasks for each kind. Kinds
+are ordered to satisfy ``kind-dependencies``, and then the ``implementation``
+specified in ``kind.yml`` is used to load the tasks for that kind.
+
+Specifically, the class's ``load_tasks`` class method is called, and returns a
+list of new ``Task`` instances.
+
+TransformTask
+-------------
+
+Most kinds generate their tasks by starting with a set of items describing the
+jobs that should be performed and transforming them into task definitions.
+This is the familiar ``transforms`` key in ``kind.yml`` and is further
+documented in :doc:`transforms`.
+
+Such kinds generally specify their tasks in a common format: either based on a
+``jobs`` property in ``kind.yml``, or on YAML files listed in ``jobs-from``.
+This is handled by the ``TransformTask`` class in
+``taskcluster/taskgraph/task/transform.py``.
+
+For kinds producing tasks that depend on other tasks -- for example, signing
+tasks depend on build tasks -- ``TransformTask`` has a ``get_inputs`` method
+that can be overridden in subclasses and written to return a set of items based
+on tasks that already exist. You can see a nice example of this behavior in
+``taskcluster/taskgraph/task/post_build.py``.
+
+For more information on how all of this works, consult the docstrings and
+comments in the source code itself.
diff --git a/taskcluster/docs/parameters.rst b/taskcluster/docs/parameters.rst
new file mode 100644
index 000000000..8514259ce
--- /dev/null
+++ b/taskcluster/docs/parameters.rst
@@ -0,0 +1,97 @@
+==========
+Parameters
+==========
+
+Task-graph generation takes a collection of parameters as input, in the form of
+a JSON or YAML file.
+
+During decision-task processing, some of these parameters are supplied on the
+command line or by environment variables. The decision task helpfully produces
+a full parameters file as one of its output artifacts. The other ``mach
+taskgraph`` commands can take this file as input. This can be very helpful
+when working on a change to the task graph.
+
+When experimenting with local runs of the task-graph generation, it is always
+best to find a recent decision task's ``parameters.yml`` file, and modify that
+file if necessary, rather than starting from scratch. This ensures you have a
+complete set of parameters.
+
+The properties of the parameters object are described here, divided rougly by
+topic.
+
+Push Information
+----------------
+
+``triggered_by``
+ The event that precipitated this decision task; one of ``"nightly"`` or
+ ``"push"``.
+
+``base_repository``
+ The repository from which to do an initial clone, utilizing any available
+ caching.
+
+``head_repository``
+ The repository containing the changeset to be built. This may differ from
+ ``base_repository`` in cases where ``base_repository`` is likely to be cached
+ and only a few additional commits are needed from ``head_repository``.
+
+``head_rev``
+ The revision to check out; this can be a short revision string
+
+``head_ref``
+ For Mercurial repositories, this is the same as ``head_rev``. For
+ git repositories, which do not allow pulling explicit revisions, this gives
+ the symbolic ref containing ``head_rev`` that should be pulled from
+ ``head_repository``.
+
+``owner``
+ Email address indicating the person who made the push. Note that this
+ value may be forged and *must not* be relied on for authentication.
+
+``message``
+ The commit message
+
+``pushlog_id``
+ The ID from the ``hg.mozilla.org`` pushlog
+
+``pushdate``
+ The timestamp of the push to the repository that triggered this decision
+ task. Expressed as an integer seconds since the UNIX epoch.
+
+``build_date``
+ The timestamp of the build date. Defaults to ``pushdate`` and falls back to present time of
+ taskgraph invocation. Expressed as an integer seconds since the UNIX epoch.
+
+``moz_build_date``
+ A formatted timestamp of ``build_date``. Expressed as a string with the following
+ format: %Y%m%d%H%M%S
+
+Tree Information
+----------------
+
+``project``
+ Another name for what may otherwise be called tree or branch or
+ repository. This is the unqualified name, such as ``mozilla-central`` or
+ ``cedar``.
+
+``level``
+ The `SCM level
+ <https://www.mozilla.org/en-US/about/governance/policies/commit/access-policy/>`_
+ associated with this tree. This dictates the names of resources used in the
+ generated tasks, and those tasks will fail if it is incorrect.
+
+Target Set
+----------
+
+The "target set" is the set of task labels which must be included in a task
+graph. The task graph generation process will include any tasks required by
+those in the target set, recursively. In a decision task, this set can be
+specified programmatically using one of a variety of methods (e.g., parsing try
+syntax or reading a project-specific configuration file).
+
+``target_tasks_method``
+ The method to use to determine the target task set. This is the suffix of
+ one of the functions in ``tascluster/taskgraph/target_tasks.py``.
+
+``optimize_target_tasks``
+ If true, then target tasks are eligible for optimization.
diff --git a/taskcluster/docs/reference.rst b/taskcluster/docs/reference.rst
new file mode 100644
index 000000000..813a3f630
--- /dev/null
+++ b/taskcluster/docs/reference.rst
@@ -0,0 +1,12 @@
+Reference
+=========
+
+These sections contain some reference documentation for various aspects of
+taskgraph generation.
+
+.. toctree::
+
+ kinds
+ parameters
+ attributes
+ caches
diff --git a/taskcluster/docs/taskgraph.rst b/taskcluster/docs/taskgraph.rst
new file mode 100644
index 000000000..5d3e7c7d3
--- /dev/null
+++ b/taskcluster/docs/taskgraph.rst
@@ -0,0 +1,276 @@
+======================
+TaskGraph Mach Command
+======================
+
+The task graph is built by linking different kinds of tasks together, pruning
+out tasks that are not required, then optimizing by replacing subgraphs with
+links to already-completed tasks.
+
+Concepts
+--------
+
+* *Task Kind* - Tasks are grouped by kind, where tasks of the same kind do not
+ have interdependencies but have substantial similarities, and may depend on
+ tasks of other kinds. Kinds are the primary means of supporting diversity,
+ in that a developer can add a new kind to do just about anything without
+ impacting other kinds.
+
+* *Task Attributes* - Tasks have string attributes by which can be used for
+ filtering. Attributes are documented in :doc:`attributes`.
+
+* *Task Labels* - Each task has a unique identifier within the graph that is
+ stable across runs of the graph generation algorithm. Labels are replaced
+ with TaskCluster TaskIds at the latest time possible, facilitating analysis
+ of graphs without distracting noise from randomly-generated taskIds.
+
+* *Optimization* - replacement of a task in a graph with an equivalent,
+ already-completed task, or a null task, avoiding repetition of work.
+
+Kinds
+-----
+
+Kinds are the focal point of this system. They provide an interface between
+the large-scale graph-generation process and the small-scale task-definition
+needs of different kinds of tasks. Each kind may implement task generation
+differently. Some kinds may generate task definitions entirely internally (for
+example, symbol-upload tasks are all alike, and very simple), while other kinds
+may do little more than parse a directory of YAML files.
+
+A ``kind.yml`` file contains data about the kind, as well as referring to a
+Python class implementing the kind in its ``implementation`` key. That
+implementation may rely on lots of code shared with other kinds, or contain a
+completely unique implementation of some functionality.
+
+The full list of pre-defined keys in this file is:
+
+``implementation``
+ Class implementing this kind, in the form ``<module-path>:<object-path>``.
+ This class should be a subclass of ``taskgraph.kind.base:Kind``.
+
+``kind-dependencies``
+ Kinds which should be loaded before this one. This is useful when the kind
+ will use the list of already-created tasks to determine which tasks to
+ create, for example adding an upload-symbols task after every build task.
+
+Any other keys are subject to interpretation by the kind implementation.
+
+The result is a nice segmentation of implementation so that the more esoteric
+in-tree projects can do their crazy stuff in an isolated kind without making
+the bread-and-butter build and test configuration more complicated.
+
+Dependencies
+------------
+
+Dependencies between tasks are represented as labeled edges in the task graph.
+For example, a test task must depend on the build task creating the artifact it
+tests, and this dependency edge is named 'build'. The task graph generation
+process later resolves these dependencies to specific taskIds.
+
+Decision Task
+-------------
+
+The decision task is the first task created when a new graph begins. It is
+responsible for creating the rest of the task graph.
+
+The decision task for pushes is defined in-tree, in ``.taskcluster.yml``. That
+task description invokes ``mach taskcluster decision`` with some metadata about
+the push. That mach command determines the optimized task graph, then calls
+the TaskCluster API to create the tasks.
+
+Note that this mach command is *not* designed to be invoked directly by humans.
+Instead, use the mach commands described below, supplying ``parameters.yml``
+from a recent decision task. These commands allow testing everything the
+decision task does except the command-line processing and the
+``queue.createTask`` calls.
+
+Graph Generation
+----------------
+
+Graph generation, as run via ``mach taskgraph decision``, proceeds as follows:
+
+#. For all kinds, generate all tasks. The result is the "full task set"
+#. Create dependency links between tasks using kind-specific mechanisms. The
+ result is the "full task graph".
+#. Select the target tasks (based on try syntax or a tree-specific
+ specification). The result is the "target task set".
+#. Based on the full task graph, calculate the transitive closure of the target
+ task set. That is, the target tasks and all requirements of those tasks.
+ The result is the "target task graph".
+#. Optimize the target task graph based on kind-specific optimization methods.
+ The result is the "optimized task graph" with fewer nodes than the target
+ task graph.
+#. Create tasks for all tasks in the optimized task graph.
+
+Transitive Closure
+..................
+
+Transitive closure is a fancy name for this sort of operation:
+
+ * start with a set of tasks
+ * add all tasks on which any of those tasks depend
+ * repeat until nothing changes
+
+The effect is this: imagine you start with a linux32 test job and a linux64 test job.
+In the first round, each test task depends on the test docker image task, so add that image task.
+Each test also depends on a build, so add the linux32 and linux64 build tasks.
+
+Then repeat: the test docker image task is already present, as are the build
+tasks, but those build tasks depend on the build docker image task. So add
+that build docker image task. Repeat again: this time, none of the tasks in
+the set depend on a task not in the set, so nothing changes and the process is
+complete.
+
+And as you can see, the graph we've built now includes everything we wanted
+(the test jobs) plus everything required to do that (docker images, builds).
+
+Optimization
+------------
+
+The objective of optimization to remove as many tasks from the graph as
+possible, as efficiently as possible, thereby delivering useful results as
+quickly as possible. For example, ideally if only a test script is modified in
+a push, then the resulting graph contains only the corresponding test suite
+task.
+
+A task is said to be "optimized" when it is either replaced with an equivalent,
+already-existing task, or dropped from the graph entirely.
+
+A task can be optimized if all of its dependencies can be optimized and none of
+its inputs have changed. For a task on which no other tasks depend (a "leaf
+task"), the optimizer can determine what has changed by looking at the
+version-control history of the push: if the relevant files are not modified in
+the push, then it considers the inputs unchanged. For tasks on which other
+tasks depend ("non-leaf tasks"), the optimizer must replace the task with
+another, equivalent task, so it generates a hash of all of the inputs and uses
+that to search for a matching, existing task.
+
+In some cases, such as try pushes, tasks in the target task set have been
+explicitly requested and are thus excluded from optimization. In other cases,
+the target task set is almost the entire task graph, so targetted tasks are
+considered for optimization. This behavior is controlled with the
+``optimize_target_tasks`` parameter.
+
+Action Tasks
+------------
+
+Action Tasks are tasks which help you to schedule new jobs via Treeherder's
+"Add New Jobs" feature. The Decision Task creates a YAML file named
+``action.yml`` which can be used to schedule Action Tasks after suitably replacing
+``{{decision_task_id}}`` and ``{{task_labels}}``, which correspond to the decision
+task ID of the push and a comma separated list of task labels which need to be
+scheduled.
+
+This task invokes ``mach taskgraph action-task`` which builds up a task graph of
+the requested tasks. This graph is optimized using the tasks running initially in
+the same push, due to the decision task.
+
+So for instance, if you had already requested a build task in the ``try`` command,
+and you wish to add a test which depends on this build, the original build task
+is re-used.
+
+Action Tasks are currently scheduled by
+[pulse_actions](https://github.com/mozilla/pulse_actions). This feature is only
+present on ``try`` pushes for now.
+
+Mach commands
+-------------
+
+A number of mach subcommands are available aside from ``mach taskgraph
+decision`` to make this complex system more accesssible to those trying to
+understand or modify it. They allow you to run portions of the
+graph-generation process and output the results.
+
+``mach taskgraph tasks``
+ Get the full task set
+
+``mach taskgraph full``
+ Get the full task graph
+
+``mach taskgraph target``
+ Get the target task set
+
+``mach taskgraph target-graph``
+ Get the target task graph
+
+``mach taskgraph optimized``
+ Get the optimized task graph
+
+Each of these commands taskes a ``--parameters`` option giving a file with
+parameters to guide the graph generation. The decision task helpfully produces
+such a file on every run, and that is generally the easiest way to get a
+parameter file. The parameter keys and values are described in
+:doc:`parameters`; using that information, you may modify an existing
+``parameters.yml`` or create your own.
+
+Task Parameterization
+---------------------
+
+A few components of tasks are only known at the very end of the decision task
+-- just before the ``queue.createTask`` call is made. These are specified
+using simple parameterized values, as follows:
+
+``{"relative-datestamp": "certain number of seconds/hours/days/years"}``
+ Objects of this form will be replaced with an offset from the current time
+ just before the ``queue.createTask`` call is made. For example, an
+ artifact expiration might be specified as ``{"relative-timestamp": "1
+ year"}``.
+
+``{"task-reference": "string containing <dep-name>"}``
+ The task definition may contain "task references" of this form. These will
+ be replaced during the optimization step, with the appropriate taskId for
+ the named dependency substituted for ``<dep-name>`` in the string.
+ Multiple labels may be substituted in a single string, and ``<<>`` can be
+ used to escape a literal ``<``.
+
+Taskgraph JSON Format
+---------------------
+
+Task graphs -- both the graph artifacts produced by the decision task and those
+output by the ``--json`` option to the ``mach taskgraph`` commands -- are JSON
+objects, keyed by label, or for optimized task graphs, by taskId. For
+convenience, the decision task also writes out ``label-to-taskid.json``
+containing a mapping from label to taskId. Each task in the graph is
+represented as a JSON object.
+
+Each task has the following properties:
+
+``task_id``
+ The task's taskId (only for optimized task graphs)
+
+``label``
+ The task's label
+
+``attributes``
+ The task's attributes
+
+``dependencies``
+ The task's in-graph dependencies, represented as an object mapping
+ dependency name to label (or to taskId for optimized task graphs)
+
+``task``
+ The task's TaskCluster task definition.
+
+``kind_implementation``
+ The module and the class name which was used to implement this particular task.
+ It is always of the form ``<module-path>:<object-path>``
+
+The results from each command are in the same format, but with some differences
+in the content:
+
+* The ``tasks`` and ``target`` subcommands both return graphs with no edges.
+ That is, just collections of tasks without any dependencies indicated.
+
+* The ``optimized`` subcommand returns tasks that have been assigned taskIds.
+ The dependencies array, too, contains taskIds instead of labels, with
+ dependencies on optimized tasks omitted. However, the ``task.dependencies``
+ array is populated with the full list of dependency taskIds. All task
+ references are resolved in the optimized graph.
+
+The output of the ``mach taskgraph`` commands are suitable for processing with
+the `jq <https://stedolan.github.io/jq/>`_ utility. For example, to extract all
+tasks' labels and their dependencies:
+
+.. code-block:: shell
+
+ jq 'to_entries | map({label: .value.label, dependencies: .value.dependencies})'
+
diff --git a/taskcluster/docs/transforms.rst b/taskcluster/docs/transforms.rst
new file mode 100644
index 000000000..1679c5589
--- /dev/null
+++ b/taskcluster/docs/transforms.rst
@@ -0,0 +1,198 @@
+Transforms
+==========
+
+Many task kinds generate tasks by a process of transforming job descriptions
+into task definitions. The basic operation is simple, although the sequence of
+transforms applied for a particular kind may not be!
+
+Overview
+--------
+
+To begin, a kind implementation generates a collection of items; see
+:doc:`loading`. The items are simply Python dictionaries, and describe
+"semantically" what the resulting task or tasks should do.
+
+The kind also defines a sequence of transformations. These are applied, in
+order, to each item. Early transforms might apply default values or break
+items up into smaller items (for example, chunking a test suite). Later
+transforms rewrite the items entirely, with the final result being a task
+definition.
+
+Transform Functions
+...................
+
+Each transformation looks like this:
+
+.. code-block::
+
+ @transforms.add
+ def transform_an_item(config, items):
+ """This transform ...""" # always a docstring!
+ for item in items:
+ # ..
+ yield item
+
+The ``config`` argument is a Python object containing useful configuration for
+the kind, and is a subclass of
+:class:`taskgraph.transforms.base.TransformConfig`, which specifies a few of
+its attributes. Kinds may subclass and add additional attributes if necessary.
+
+While most transforms yield one item for each item consumed, this is not always
+true: items that are not yielded are effectively filtered out. Yielding
+multiple items for each consumed item implements item duplication; this is how
+test chunking is accomplished, for example.
+
+The ``transforms`` object is an instance of
+:class:`taskgraph.transforms.base.TransformSequence`, which serves as a simple
+mechanism to combine a sequence of transforms into one.
+
+Schemas
+.......
+
+The items used in transforms are validated against some simple schemas at
+various points in the transformation process. These schemas accomplish two
+things: they provide a place to add comments about the meaning of each field,
+and they enforce that the fields are actually used in the documented fashion.
+
+Keyed By
+........
+
+Several fields in the input items can be "keyed by" another value in the item.
+For example, a test description's chunks may be keyed by ``test-platform``.
+In the item, this looks like:
+
+.. code-block:: yaml
+
+ chunks:
+ by-test-platform:
+ linux64/debug: 12
+ linux64/opt: 8
+ default: 10
+
+This is a simple but powerful way to encode business rules in the items
+provided as input to the transforms, rather than expressing those rules in the
+transforms themselves. If you are implementing a new business rule, prefer
+this mode where possible. The structure is easily resolved to a single value
+using :func:`taskgraph.transform.base.get_keyed_by`.
+
+Organization
+-------------
+
+Task creation operates broadly in a few phases, with the interfaces of those
+stages defined by schemas. The process begins with the raw data structures
+parsed from the YAML files in the kind configuration. This data can processed
+by kind-specific transforms resulting, for test jobs, in a "test description".
+For non-test jobs, the next step is a "job description". These transformations
+may also "duplicate" tasks, for example to implement chunking or several
+variations of the same task.
+
+In any case, shared transforms then convert this into a "task description",
+which the task-generation transforms then convert into a task definition
+suitable for ``queue.createTask``.
+
+Test Descriptions
+-----------------
+
+The transforms configured for test kinds proceed as follows, based on
+configuration in ``kind.yml``:
+
+ * The test description is validated to conform to the schema in
+ ``taskcluster/taskgraph/transforms/tests/test_description.py``. This schema
+ is extensively documented and is a the primary reference for anyone
+ modifying tests.
+
+ * Kind-specific transformations are applied. These may apply default
+ settings, split tests (e.g., one to run with feature X enabled, one with it
+ disabled), or apply across-the-board business rules such as "all desktop
+ debug test platforms should have a max-run-time of 5400s".
+
+ * Transformations generic to all tests are applied. These apply policies
+ which apply to multiple kinds, e.g., for treeherder tiers. This is also the
+ place where most values which differ based on platform are resolved, and
+ where chunked tests are split out into a test per chunk.
+
+ * The test is again validated against the same schema. At this point it is
+ still a test description, just with defaults and policies applied, and
+ per-platform options resolved. So transforms up to this point do not modify
+ the "shape" of the test description, and are still governed by the schema in
+ ``test_description.py``.
+
+ * The ``taskgraph.transforms.tests.make_task_description:transforms`` then
+ take the test description and create a *task* description. This transform
+ embodies the specifics of how test runs work: invoking mozharness, various
+ worker options, and so on.
+
+ * Finally, the ``taskgraph.transforms.task:transforms``, described above
+ under "Task-Generation Transforms", are applied.
+
+Test dependencies are produced in the form of a dictionary mapping dependency
+name to task label.
+
+Job Descriptions
+----------------
+
+A job description says what to run in the task. It is a combination of a
+``run`` section and all of the fields from a task description. The run section
+has a ``using`` property that defines how this task should be run; for example,
+``mozharness`` to run a mozharness script, or ``mach`` to run a mach command.
+The remainder of the run section is specific to the run-using implementation.
+
+The effect of a job description is to say "run this thing on this worker". The
+job description must contain enough information about the worker to identify
+the workerType and the implementation (docker-worker, generic-worker, etc.).
+Any other task-description information is passed along verbatim, although it is
+augmented by the run-using implementation.
+
+The run-using implementations are all located in
+``taskcluster/taskgraph/transforms/job``, along with the schemas for their
+implementations. Those well-commented source files are the canonical
+documentation for what constitutes a job description, and should be considered
+part of the documentation.
+
+Task Descriptions
+-----------------
+
+Every kind needs to create tasks, and all of those tasks have some things in
+common. They all run on one of a small set of worker implementations, each
+with their own idiosyncracies. And they all report to TreeHerder in a similar
+way.
+
+The transforms in ``taskcluster/taskgraph/transforms/task.py`` implement
+this common functionality. They expect a "task description", and produce a
+task definition. The schema for a task description is defined at the top of
+``task.py``, with copious comments. Go forth and read it now!
+
+In general, the task-description transforms handle functionality that is common
+to all Gecko tasks. While the schema is the definitive reference, the
+functionality includes:
+
+* TreeHerder metadata
+
+* Build index routes
+
+* Information about the projects on which this task should run
+
+* Optimizations
+
+* Defaults for ``expires-after`` and and ``deadline-after``, based on project
+
+* Worker configuration
+
+The parts of the task description that are specific to a worker implementation
+are isolated in a ``task_description['worker']`` object which has an
+``implementation`` property naming the worker implementation. Each worker
+implementation has its own section of the schema describing the fields it
+expects. Thus the transforms that produce a task description must be aware of
+the worker implementation to be used, but need not be aware of the details of
+its payload format.
+
+The ``task.py`` file also contains a dictionary mapping treeherder groups to
+group names using an internal list of group names. Feel free to add additional
+groups to this list as necessary.
+
+More Detail
+-----------
+
+The source files provide lots of additional detail, both in the code itself and
+in the comments and docstrings. For the next level of detail beyond this file,
+consult the transform source under ``taskcluster/taskgraph/transforms``.
diff --git a/taskcluster/docs/yaml-templates.rst b/taskcluster/docs/yaml-templates.rst
new file mode 100644
index 000000000..515999e60
--- /dev/null
+++ b/taskcluster/docs/yaml-templates.rst
@@ -0,0 +1,49 @@
+Task Definition YAML Templates
+==============================
+
+A few kinds of tasks are described using templated YAML files. These files
+allow some limited forms of inheritance and template substitution as well as
+the usual YAML features, as described below.
+
+Please do not use these features in new kinds. If you are tempted to use
+variable substitution over a YAML file to define tasks, please instead
+implement a new kind-specific transform to accopmlish your goal. For example,
+if the current push-id must be included as an argument in
+``task.payload.command``, write a transform function that makes that assignment
+while building a job description, rather than parameterizing that value in the
+input to the transforms.
+
+Inheritance
+-----------
+
+One YAML file can "inherit" from another by including a top-level ``$inherits``
+key. That key specifies the parent file in ``from``, and optionally a
+collection of variables in ``variables``. For example:
+
+.. code-block:: yaml
+
+ $inherits:
+ from: 'tasks/builds/base_linux32.yml'
+ variables:
+ build_name: 'linux32'
+ build_type: 'dbg'
+
+Inheritance proceeds as follows: First, the child document has its template
+substitutions performed and is parsed as YAML. Then, the parent document is
+parsed, with substitutions specified by ``variables`` added to the template
+substitutions. Finally, the child document is merged with the parent.
+
+To merge two JSON objects (dictionaries), each value is merged individually.
+Lists are merged by concatenating the lists from the parent and child
+documents. Atomic values (strings, numbers, etc.) are merged by preferring the
+child document's value.
+
+Substitution
+------------
+
+Each document is expanded using the PyStache template engine before it is
+parsed as YAML. The parameters for this expansion are specific to the task
+kind.
+
+Simple value substitution looks like ``{{variable}}``. Function calls look
+like ``{{#function}}argument{{/function}}``.
diff --git a/taskcluster/mach_commands.py b/taskcluster/mach_commands.py
new file mode 100644
index 000000000..b5515db14
--- /dev/null
+++ b/taskcluster/mach_commands.py
@@ -0,0 +1,290 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import json
+import logging
+import sys
+import traceback
+
+from mach.decorators import (
+ CommandArgument,
+ CommandProvider,
+ Command,
+ SubCommand,
+)
+
+from mozbuild.base import MachCommandBase
+
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+
+
+class ShowTaskGraphSubCommand(SubCommand):
+ """A SubCommand with TaskGraph-specific arguments"""
+
+ def __call__(self, func):
+ after = SubCommand.__call__(self, func)
+ args = [
+ CommandArgument('--root', '-r', default='taskcluster/ci',
+ help="root of the taskgraph definition relative to topsrcdir"),
+ CommandArgument('--quiet', '-q', action="store_true",
+ help="suppress all logging output"),
+ CommandArgument('--verbose', '-v', action="store_true",
+ help="include debug-level logging output"),
+ CommandArgument('--json', '-J', action="store_const",
+ dest="format", const="json",
+ help="Output task graph as a JSON object"),
+ CommandArgument('--labels', '-L', action="store_const",
+ dest="format", const="labels",
+ help="Output the label for each task in the task graph (default)"),
+ CommandArgument('--parameters', '-p', required=True,
+ help="parameters file (.yml or .json; see "
+ "`taskcluster/docs/parameters.rst`)`"),
+ CommandArgument('--no-optimize', dest="optimize", action="store_false",
+ default="true",
+ help="do not remove tasks from the graph that are found in the "
+ "index (a.k.a. optimize the graph)"),
+ ]
+ for arg in args:
+ after = arg(after)
+ return after
+
+
+@CommandProvider
+class MachCommands(MachCommandBase):
+
+ @Command('taskgraph', category="ci",
+ description="Manipulate TaskCluster task graphs defined in-tree")
+ def taskgraph(self):
+ """The taskgraph subcommands all relate to the generation of task graphs
+ for Gecko continuous integration. A task graph is a set of tasks linked
+ by dependencies: for example, a binary must be built before it is tested,
+ and that build may further depend on various toolchains, libraries, etc.
+ """
+
+ @SubCommand('taskgraph', 'python-tests',
+ description='Run the taskgraph unit tests')
+ def taskgraph_python_tests(self, **options):
+ import unittest
+ import mozunit
+ suite = unittest.defaultTestLoader.discover('taskgraph.test')
+ runner = mozunit.MozTestRunner(verbosity=2)
+ result = runner.run(suite)
+ if not result.wasSuccessful():
+ sys.exit(1)
+
+ @ShowTaskGraphSubCommand('taskgraph', 'tasks',
+ description="Show all tasks in the taskgraph")
+ def taskgraph_tasks(self, **options):
+ return self.show_taskgraph('full_task_set', options)
+
+ @ShowTaskGraphSubCommand('taskgraph', 'full',
+ description="Show the full taskgraph")
+ def taskgraph_full(self, **options):
+ return self.show_taskgraph('full_task_graph', options)
+
+ @ShowTaskGraphSubCommand('taskgraph', 'target',
+ description="Show the target task set")
+ def taskgraph_target(self, **options):
+ return self.show_taskgraph('target_task_set', options)
+
+ @ShowTaskGraphSubCommand('taskgraph', 'target-graph',
+ description="Show the target taskgraph")
+ def taskgraph_target_taskgraph(self, **options):
+ return self.show_taskgraph('target_task_graph', options)
+
+ @ShowTaskGraphSubCommand('taskgraph', 'optimized',
+ description="Show the optimized taskgraph")
+ def taskgraph_optimized(self, **options):
+ return self.show_taskgraph('optimized_task_graph', options)
+
+ @SubCommand('taskgraph', 'decision',
+ description="Run the decision task")
+ @CommandArgument('--root', '-r',
+ default='taskcluster/ci',
+ help="root of the taskgraph definition relative to topsrcdir")
+ @CommandArgument('--base-repository',
+ required=True,
+ help='URL for "base" repository to clone')
+ @CommandArgument('--head-repository',
+ required=True,
+ help='URL for "head" repository to fetch revision from')
+ @CommandArgument('--head-ref',
+ required=True,
+ help='Reference (this is same as rev usually for hg)')
+ @CommandArgument('--head-rev',
+ required=True,
+ help='Commit revision to use from head repository')
+ @CommandArgument('--message',
+ required=True,
+ help='Commit message to be parsed. Example: "try: -b do -p all -u all"')
+ @CommandArgument('--revision-hash',
+ required=True,
+ help='Treeherder revision hash (long revision id) to attach results to')
+ @CommandArgument('--project',
+ required=True,
+ help='Project to use for creating task graph. Example: --project=try')
+ @CommandArgument('--pushlog-id',
+ dest='pushlog_id',
+ required=True,
+ default=0)
+ @CommandArgument('--pushdate',
+ dest='pushdate',
+ required=True,
+ type=int,
+ default=0)
+ @CommandArgument('--owner',
+ required=True,
+ help='email address of who owns this graph')
+ @CommandArgument('--level',
+ required=True,
+ help='SCM level of this repository')
+ @CommandArgument('--triggered-by',
+ choices=['nightly', 'push'],
+ default='push',
+ help='Source of execution of the decision graph')
+ @CommandArgument('--target-tasks-method',
+ help='method for selecting the target tasks to generate')
+ def taskgraph_decision(self, **options):
+ """Run the decision task: generate a task graph and submit to
+ TaskCluster. This is only meant to be called within decision tasks,
+ and requires a great many arguments. Commands like `mach taskgraph
+ optimized` are better suited to use on the command line, and can take
+ the parameters file generated by a decision task. """
+
+ import taskgraph.decision
+ try:
+ self.setup_logging()
+ return taskgraph.decision.taskgraph_decision(options)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+ @SubCommand('taskgraph', 'action-task',
+ description="Run the action task")
+ @CommandArgument('--root', '-r',
+ default='taskcluster/ci',
+ help="root of the taskgraph definition relative to topsrcdir")
+ @CommandArgument('--decision-id',
+ required=True,
+ help="Decision Task ID of the reference decision task")
+ @CommandArgument('--task-labels',
+ required=True,
+ help='Comma separated list of task labels to be scheduled')
+ def taskgraph_action(self, **options):
+ """Run the action task: Generates a task graph using the set of labels
+ provided in the task-labels parameter. It uses the full-task file of
+ the gecko decision task."""
+
+ import taskgraph.action
+ try:
+ self.setup_logging()
+ return taskgraph.action.taskgraph_action(options)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+ def setup_logging(self, quiet=False, verbose=True):
+ """
+ Set up Python logging for all loggers, sending results to stderr (so
+ that command output can be redirected easily) and adding the typical
+ mach timestamp.
+ """
+ # remove the old terminal handler
+ old = self.log_manager.replace_terminal_handler(None)
+
+ # re-add it, with level and fh set appropriately
+ if not quiet:
+ level = logging.DEBUG if verbose else logging.INFO
+ self.log_manager.add_terminal_logging(
+ fh=sys.stderr, level=level,
+ write_interval=old.formatter.write_interval,
+ write_times=old.formatter.write_times)
+
+ # all of the taskgraph logging is unstructured logging
+ self.log_manager.enable_unstructured()
+
+ def show_taskgraph(self, graph_attr, options):
+ import taskgraph.parameters
+ import taskgraph.target_tasks
+ import taskgraph.generator
+
+ try:
+ self.setup_logging(quiet=options['quiet'], verbose=options['verbose'])
+ parameters = taskgraph.parameters.load_parameters_file(options)
+ parameters.check()
+
+ target_tasks_method = parameters.get('target_tasks_method', 'all_tasks')
+ target_tasks_method = taskgraph.target_tasks.get_method(target_tasks_method)
+ tgg = taskgraph.generator.TaskGraphGenerator(
+ root_dir=options['root'],
+ parameters=parameters,
+ target_tasks_method=target_tasks_method)
+
+ tg = getattr(tgg, graph_attr)
+
+ show_method = getattr(self, 'show_taskgraph_' + (options['format'] or 'labels'))
+ show_method(tg)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+ def show_taskgraph_labels(self, taskgraph):
+ for label in taskgraph.graph.visit_postorder():
+ print(label)
+
+ def show_taskgraph_json(self, taskgraph):
+ print(json.dumps(taskgraph.to_json(),
+ sort_keys=True, indent=2, separators=(',', ': ')))
+
+
+@CommandProvider
+class TaskClusterImagesProvider(object):
+ @Command('taskcluster-load-image', category="ci",
+ description="Load a pre-built Docker image")
+ @CommandArgument('--task-id',
+ help="Load the image at public/image.tar in this task,"
+ "rather than searching the index")
+ @CommandArgument('image_name', nargs='?',
+ help="Load the image of this name based on the current"
+ "contents of the tree (as built for mozilla-central"
+ "or mozilla-inbound)")
+ def load_image(self, image_name, task_id):
+ from taskgraph.docker import load_image_by_name, load_image_by_task_id
+ if not image_name and not task_id:
+ print("Specify either IMAGE-NAME or TASK-ID")
+ sys.exit(1)
+ try:
+ if task_id:
+ ok = load_image_by_task_id(task_id)
+ else:
+ ok = load_image_by_name(image_name)
+ if not ok:
+ sys.exit(1)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+ @Command('taskcluster-build-image', category='ci',
+ description='Build a Docker image')
+ @CommandArgument('image_name',
+ help='Name of the image to build')
+ @CommandArgument('--context-only',
+ help="File name the context tarball should be written to."
+ "with this option it will only build the context.tar.",
+ metavar='context.tar')
+ def build_image(self, image_name, context_only):
+ from taskgraph.docker import build_image, build_context
+ try:
+ if context_only is None:
+ build_image(image_name)
+ else:
+ build_context(image_name, context_only)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
diff --git a/taskcluster/moz.build b/taskcluster/moz.build
new file mode 100644
index 000000000..d1e280706
--- /dev/null
+++ b/taskcluster/moz.build
@@ -0,0 +1,7 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SPHINX_TREES['taskcluster'] = 'docs'
diff --git a/taskcluster/scripts/builder/build-haz-linux.sh b/taskcluster/scripts/builder/build-haz-linux.sh
new file mode 100755
index 000000000..1d5ef52ba
--- /dev/null
+++ b/taskcluster/scripts/builder/build-haz-linux.sh
@@ -0,0 +1,89 @@
+#!/bin/bash -ex
+
+function usage() {
+ echo "Usage: $0 [--project <shell|browser>] <workspace-dir> flags..."
+ echo "flags are treated the same way as a commit message would be"
+ echo "(as in, they are scanned for directives just like a try: ... line)"
+}
+
+PROJECT=shell
+WORKSPACE=
+DO_TOOLTOOL=1
+while [[ $# -gt 0 ]]; do
+ if [[ "$1" == "-h" ]] || [[ "$1" == "--help" ]]; then
+ usage
+ exit 0
+ elif [[ "$1" == "--project" ]]; then
+ shift
+ PROJECT="$1"
+ shift
+ elif [[ "$1" == "--no-tooltool" ]]; then
+ shift
+ DO_TOOLTOOL=
+ elif [[ -z "$WORKSPACE" ]]; then
+ WORKSPACE=$( cd "$1" && pwd )
+ shift
+ break
+ fi
+done
+
+SCRIPT_FLAGS="$@"
+
+# Ensure all the scripts in this dir are on the path....
+DIRNAME=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+PATH=$DIRNAME:$PATH
+
+# Use GECKO_BASE_REPOSITORY as a signal for whether we are running in automation.
+export AUTOMATION=${GECKO_BASE_REPOSITORY:+1}
+
+: ${GECKO_DIR:=$WORKSPACE/gecko}
+: ${TOOLTOOL_MANIFEST:=browser/config/tooltool-manifests/linux64/hazard.manifest}
+: ${TOOLTOOL_CACHE:=$WORKSPACE/tt-cache}
+
+if ! [ -d $GECKO_DIR ]; then
+ echo "GECKO_DIR must be set to a directory containing a gecko source checkout" >&2
+ exit 1
+fi
+GECKO_DIR=$( cd "$GECKO_DIR" && pwd )
+
+# Directory to populate with tooltool-installed tools
+export TOOLTOOL_DIR="$WORKSPACE"
+
+# Directory to hold the (useless) object files generated by the analysis.
+export MOZ_OBJDIR="$WORKSPACE/obj-analyzed"
+mkdir -p "$MOZ_OBJDIR"
+
+if [ -n "$DO_TOOLTOOL" ]; then
+ ( cd $TOOLTOOL_DIR; python $GECKO_DIR/testing/docker/recipes/tooltool.py --url https://api.pub.build.mozilla.org/tooltool/ -m $GECKO_DIR/$TOOLTOOL_MANIFEST fetch -c $TOOLTOOL_CACHE )
+fi
+
+export NO_MERCURIAL_SETUP_CHECK=1
+
+if [[ "$PROJECT" = "browser" ]]; then (
+ cd "$WORKSPACE"
+ set "$WORKSPACE"
+ . setup-ccache.sh
+ # Mozbuild config:
+ export MOZBUILD_STATE_PATH=$WORKSPACE/mozbuild/
+ # Create .mozbuild so mach doesn't complain about this
+ mkdir -p $MOZBUILD_STATE_PATH
+) fi
+. hazard-analysis.sh
+
+build_js_shell
+
+# Artifacts folder is outside of the cache.
+mkdir -p $HOME/artifacts/ || true
+
+function onexit () {
+ grab_artifacts "$WORKSPACE/analysis" "$HOME/artifacts"
+}
+
+trap onexit EXIT
+
+configure_analysis "$WORKSPACE/analysis"
+run_analysis "$WORKSPACE/analysis" "$PROJECT"
+
+check_hazards "$WORKSPACE/analysis"
+
+################################### script end ###################################
diff --git a/taskcluster/scripts/builder/build-l10n.sh b/taskcluster/scripts/builder/build-l10n.sh
new file mode 100755
index 000000000..be16955a5
--- /dev/null
+++ b/taskcluster/scripts/builder/build-l10n.sh
@@ -0,0 +1,98 @@
+#! /bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+. /home/worker/scripts/xvfb.sh
+
+####
+# Taskcluster friendly wrapper for performing fx desktop l10n repacks via mozharness.
+# Based on ./build-linux.sh
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: MOZHARNESS_ACTIONS ${MOZHARNESS_ACTIONS}
+: MOZHARNESS_OPTIONS ${MOZHARNESS_OPTIONS}
+
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/home/worker/tooltool-cache}
+
+: NEED_XVFB ${NEED_XVFB:=false}
+
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+
+set -v
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[build-l10n.sh:error]" "${@}"
+ exit 1
+}
+
+export MOZ_CRASHREPORTER_NO_REPORT=1
+export MOZ_OBJDIR=obj-firefox
+export TINDERBOX_OUTPUT=1
+
+# Ensure that in tree libraries can be found
+export LIBRARY_PATH=$LIBRARY_PATH:$WORKSPACE/src/obj-firefox:$WORKSPACE/src/gcc/lib64
+
+# test required parameters are supplied
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z ${MOZHARNESS_CONFIG} ]]; then fail "MOZHARNESS_CONFIG is not set"; fi
+
+cleanup() {
+ local rv=$?
+ cleanup_xvfb
+ exit $rv
+}
+trap cleanup EXIT INT
+
+# run XVfb in the background, if necessary
+if $NEED_XVFB; then
+ start_xvfb '1024x768x24' 2
+fi
+
+# set up mozharness configuration, via command line, env, etc.
+
+# $TOOLTOOL_CACHE bypasses mozharness completely and is read by tooltool_wrapper.sh to set the
+# cache. However, only some mozharness scripts use tooltool_wrapper.sh, so this may not be
+# entirely effective.
+export TOOLTOOL_CACHE
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config ${cfg}"
+done
+
+# if MOZHARNESS_ACTIONS is given, only run those actions (completely overriding default_actions
+# in the mozharness configuration)
+if [ -n "$MOZHARNESS_ACTIONS" ]; then
+ actions=""
+ for action in $MOZHARNESS_ACTIONS; do
+ actions="$actions --$action"
+ done
+fi
+
+# if MOZHARNESS_OPTIONS is given, append them to mozharness command line run
+# e.g. enable-pgo
+if [ -n "$MOZHARNESS_OPTIONS" ]; then
+ options=""
+ for option in $MOZHARNESS_OPTIONS; do
+ options="$options --$option"
+ done
+fi
+
+cd /home/worker
+
+python2.7 $WORKSPACE/build/src/testing/${MOZHARNESS_SCRIPT} \
+ --disable-mock \
+ --revision ${GECKO_HEAD_REV} \
+ $actions \
+ $options \
+ ${config_cmds} \
+ --log-level=debug \
+ --work-dir=$WORKSPACE/build \
diff --git a/taskcluster/scripts/builder/build-linux.sh b/taskcluster/scripts/builder/build-linux.sh
new file mode 100755
index 000000000..8885abdec
--- /dev/null
+++ b/taskcluster/scripts/builder/build-linux.sh
@@ -0,0 +1,122 @@
+#! /bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+. /home/worker/scripts/xvfb.sh
+
+####
+# Taskcluster friendly wrapper for performing fx desktop builds via mozharness.
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: MOZHARNESS_ACTIONS ${MOZHARNESS_ACTIONS}
+: MOZHARNESS_OPTIONS ${MOZHARNESS_OPTIONS}
+
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/home/worker/tooltool-cache}
+
+: NEED_XVFB ${NEED_XVFB:=false}
+
+: MH_CUSTOM_BUILD_VARIANT_CFG ${MH_CUSTOM_BUILD_VARIANT_CFG}
+: MH_BRANCH ${MH_BRANCH:=mozilla-central}
+: MH_BUILD_POOL ${MH_BUILD_POOL:=staging}
+: MOZ_SCM_LEVEL ${MOZ_SCM_LEVEL:=1}
+
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+
+set -v
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[build-linux.sh:error]" "${@}"
+ exit 1
+}
+
+export MOZ_CRASHREPORTER_NO_REPORT=1
+export MOZ_OBJDIR=obj-firefox
+export TINDERBOX_OUTPUT=1
+
+# use "simple" package names so that they can be hard-coded in the task's
+# extras.locations
+export MOZ_SIMPLE_PACKAGE_NAME=target
+
+# Do not try to upload symbols (see https://bugzilla.mozilla.org/show_bug.cgi?id=1164615)
+export MOZ_AUTOMATION_UPLOAD_SYMBOLS=0
+
+# Ensure that in tree libraries can be found
+export LIBRARY_PATH=$LIBRARY_PATH:$WORKSPACE/src/obj-firefox:$WORKSPACE/src/gcc/lib64
+
+# test required parameters are supplied
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z ${MOZHARNESS_CONFIG} ]]; then fail "MOZHARNESS_CONFIG is not set"; fi
+
+cleanup() {
+ local rv=$?
+ cleanup_xvfb
+ exit $rv
+}
+trap cleanup EXIT INT
+
+# run XVfb in the background, if necessary
+if $NEED_XVFB; then
+ start_xvfb '1024x768x24' 2
+fi
+
+# set up mozharness configuration, via command line, env, etc.
+
+debug_flag=""
+if [ 0$DEBUG -ne 0 ]; then
+ debug_flag='--debug'
+fi
+
+custom_build_variant_cfg_flag=""
+if [ -n "${MH_CUSTOM_BUILD_VARIANT_CFG}" ]; then
+ custom_build_variant_cfg_flag="--custom-build-variant-cfg=${MH_CUSTOM_BUILD_VARIANT_CFG}"
+fi
+
+# $TOOLTOOL_CACHE bypasses mozharness completely and is read by tooltool_wrapper.sh to set the
+# cache. However, only some mozharness scripts use tooltool_wrapper.sh, so this may not be
+# entirely effective.
+export TOOLTOOL_CACHE
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config ${cfg}"
+done
+
+# if MOZHARNESS_ACTIONS is given, only run those actions (completely overriding default_actions
+# in the mozharness configuration)
+if [ -n "$MOZHARNESS_ACTIONS" ]; then
+ actions=""
+ for action in $MOZHARNESS_ACTIONS; do
+ actions="$actions --$action"
+ done
+fi
+
+# if MOZHARNESS_OPTIONS is given, append them to mozharness command line run
+# e.g. enable-pgo
+if [ -n "$MOZHARNESS_OPTIONS" ]; then
+ options=""
+ for option in $MOZHARNESS_OPTIONS; do
+ options="$options --$option"
+ done
+fi
+
+cd /home/worker
+
+python2.7 $WORKSPACE/build/src/testing/${MOZHARNESS_SCRIPT} ${config_cmds} \
+ $debug_flag \
+ $custom_build_variant_cfg_flag \
+ --disable-mock \
+ $actions \
+ $options \
+ --log-level=debug \
+ --scm-level=$MOZ_SCM_LEVEL \
+ --work-dir=$WORKSPACE/build \
+ --branch=${MH_BRANCH} \
+ --build-pool=${MH_BUILD_POOL}
diff --git a/taskcluster/scripts/builder/build-sm-mozjs-crate.sh b/taskcluster/scripts/builder/build-sm-mozjs-crate.sh
new file mode 100755
index 000000000..09c353084
--- /dev/null
+++ b/taskcluster/scripts/builder/build-sm-mozjs-crate.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+set -xe
+
+source $(dirname $0)/sm-tooltool-config.sh
+
+# Ensure that we have a .config/cargo that points us to our vendored crates
+# rather than to crates.io.
+cd "$SRCDIR/.cargo"
+sed -e "s|@top_srcdir@|$SRCDIR|" < config.in | tee config
+
+cd "$SRCDIR/js/src"
+
+export PATH="$PATH:$TOOLTOOL_CHECKOUT/cargo/bin:$TOOLTOOL_CHECKOUT/rustc/bin"
+export RUST_BACKTRACE=1
+
+cargo build --verbose --frozen --features debugmozjs
+cargo build --verbose --frozen
diff --git a/taskcluster/scripts/builder/build-sm-package.sh b/taskcluster/scripts/builder/build-sm-package.sh
new file mode 100755
index 000000000..6bb819f26
--- /dev/null
+++ b/taskcluster/scripts/builder/build-sm-package.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+set -xe
+
+source $(dirname $0)/sm-tooltool-config.sh
+
+mkdir -p $UPLOAD_DIR
+
+# Package up the sources into the release tarball.
+AUTOMATION=1 DIST=$UPLOAD_DIR $SRCDIR/js/src/make-source-package.sh
+
+# Extract the tarball into a new directory in the workspace.
+
+PACKAGE_DIR=$WORK/sm-package
+mkdir -p $PACKAGE_DIR
+pushd $PACKAGE_DIR
+
+tar -xjvf $UPLOAD_DIR/mozjs-*.tar.bz2
+
+: ${PYTHON:=python2.7}
+
+# Build the freshly extracted, packaged SpiderMonkey.
+pushd ./mozjs-*/js/src
+AUTOMATION=1 $PYTHON ./devtools/automation/autospider.py --skip-tests=checks $SPIDERMONKEY_VARIANT
+popd
+
+# Copy artifacts for upload by TaskCluster
+cp -rL ./mozjs-*/obj-spider/dist/bin/{js,jsapi-tests,js-gdb.py,libmozjs*} $UPLOAD_DIR
diff --git a/taskcluster/scripts/builder/build-sm.sh b/taskcluster/scripts/builder/build-sm.sh
new file mode 100755
index 000000000..d61a7a81c
--- /dev/null
+++ b/taskcluster/scripts/builder/build-sm.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -x
+
+source $(dirname $0)/sm-tooltool-config.sh
+
+: ${PYTHON:=python2.7}
+
+# Run the script
+export MOZ_UPLOAD_DIR="$UPLOAD_DIR"
+AUTOMATION=1 $PYTHON $SRCDIR/js/src/devtools/automation/autospider.py $SPIDERMONKEY_VARIANT
+BUILD_STATUS=$?
+
+# Ensure upload dir exists
+mkdir -p $UPLOAD_DIR
+
+# Copy artifacts for upload by TaskCluster
+cp -rL $SRCDIR/obj-spider/dist/bin/{js,jsapi-tests,js-gdb.py} $UPLOAD_DIR
+
+exit $BUILD_STATUS
diff --git a/taskcluster/scripts/builder/desktop-setup.sh b/taskcluster/scripts/builder/desktop-setup.sh
new file mode 100755
index 000000000..4b74a1201
--- /dev/null
+++ b/taskcluster/scripts/builder/desktop-setup.sh
@@ -0,0 +1,24 @@
+#!/bin/bash -ex
+
+test $MOZCONFIG # mozconfig is required...
+test -d $1 # workspace must exist at this point...
+WORKSPACE=$( cd "$1" && pwd )
+
+. setup-ccache.sh
+
+# Gecko source:
+export GECKO_DIR=$WORKSPACE/gecko
+# Gaia source:
+export GAIA_DIR=$WORKSPACE/gaia
+# Mozbuild config:
+export MOZBUILD_STATE_PATH=$WORKSPACE/mozbuild/
+
+# Create .mozbuild so mach doesn't complain about this
+mkdir -p $MOZBUILD_STATE_PATH
+
+### Install package dependencies
+install-packages.sh ${TOOLTOOL_DIR:-$GECKO_DIR}
+
+# Ensure object-folder exists
+export MOZ_OBJDIR=$WORKSPACE/object-folder/
+mkdir -p $MOZ_OBJDIR
diff --git a/taskcluster/scripts/builder/get-objdir.py b/taskcluster/scripts/builder/get-objdir.py
new file mode 100755
index 000000000..132e20d4f
--- /dev/null
+++ b/taskcluster/scripts/builder/get-objdir.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python2.7
+
+from __future__ import print_function
+import sys
+import os
+import json
+import subprocess
+from StringIO import StringIO
+
+gecko_dir = sys.argv[1]
+os.chdir(gecko_dir)
+
+result = subprocess.check_output(["./mach", "environment", "--format", "json"])
+environment = json.load(StringIO(result))
+
+topobjdir = environment["mozconfig"]["topobjdir"]
+if topobjdir is None:
+ topobjdir = sys.argv[2]
+
+print(topobjdir)
diff --git a/taskcluster/scripts/builder/hazard-analysis.sh b/taskcluster/scripts/builder/hazard-analysis.sh
new file mode 100755
index 000000000..d3e574742
--- /dev/null
+++ b/taskcluster/scripts/builder/hazard-analysis.sh
@@ -0,0 +1,149 @@
+#!/bin/bash -ex
+
+[ -n "$WORKSPACE" ]
+[ -n "$MOZ_OBJDIR" ]
+[ -n "$GECKO_DIR" ]
+
+HAZARD_SHELL_OBJDIR=$WORKSPACE/obj-haz-shell
+JS_SRCDIR=$GECKO_DIR/js/src
+ANALYSIS_SRCDIR=$JS_SRCDIR/devtools/rootAnalysis
+
+export CC="$TOOLTOOL_DIR/gcc/bin/gcc"
+export CXX="$TOOLTOOL_DIR/gcc/bin/g++"
+
+PYTHON=python2.7
+if ! which $PYTHON; then
+ PYTHON=python
+fi
+
+
+function check_commit_msg () {
+ ( set +e;
+ if [[ -n "$AUTOMATION" ]]; then
+ hg --cwd "$GECKO_DIR" log -r. --template '{desc}\n' | grep -F -q -- "$1"
+ else
+ echo -- "$SCRIPT_FLAGS" | grep -F -q -- "$1"
+ fi
+ )
+}
+
+if check_commit_msg "--dep"; then
+ HAZ_DEP=1
+fi
+
+function build_js_shell () {
+ # Must unset MOZ_OBJDIR and MOZCONFIG here to prevent the build system from
+ # inferring that the analysis output directory is the current objdir. We
+ # need a separate objdir here to build the opt JS shell to use to run the
+ # analysis.
+ (
+ unset MOZ_OBJDIR
+ unset MOZCONFIG
+ ( cd $JS_SRCDIR; autoconf-2.13 )
+ if [[ -z "$HAZ_DEP" ]]; then
+ [ -d $HAZARD_SHELL_OBJDIR ] && rm -rf $HAZARD_SHELL_OBJDIR
+ fi
+ mkdir -p $HAZARD_SHELL_OBJDIR || true
+ cd $HAZARD_SHELL_OBJDIR
+ $JS_SRCDIR/configure --enable-optimize --disable-debug --enable-ctypes --enable-nspr-build --without-intl-api --with-ccache
+ make -j4
+ ) # Restore MOZ_OBJDIR and MOZCONFIG
+}
+
+function configure_analysis () {
+ local analysis_dir
+ analysis_dir="$1"
+
+ if [[ -z "$HAZ_DEP" ]]; then
+ [ -d "$analysis_dir" ] && rm -rf "$analysis_dir"
+ fi
+
+ mkdir -p "$analysis_dir" || true
+ (
+ cd "$analysis_dir"
+ cat > defaults.py <<EOF
+js = "$HAZARD_SHELL_OBJDIR/dist/bin/js"
+analysis_scriptdir = "$ANALYSIS_SRCDIR"
+objdir = "$MOZ_OBJDIR"
+source = "$GECKO_DIR"
+sixgill = "$TOOLTOOL_DIR/sixgill/usr/libexec/sixgill"
+sixgill_bin = "$TOOLTOOL_DIR/sixgill/usr/bin"
+EOF
+
+ cat > run-analysis.sh <<EOF
+#!/bin/sh
+if [ \$# -eq 0 ]; then
+ set gcTypes
+fi
+export ANALYSIS_SCRIPTDIR="$ANALYSIS_SRCDIR"
+exec "$ANALYSIS_SRCDIR/analyze.py" "\$@"
+EOF
+ chmod +x run-analysis.sh
+ )
+}
+
+function run_analysis () {
+ local analysis_dir
+ analysis_dir="$1"
+ local build_type
+ build_type="$2"
+
+ if [[ -z "$HAZ_DEP" ]]; then
+ [ -d $MOZ_OBJDIR ] && rm -rf $MOZ_OBJDIR
+ fi
+
+ (
+ cd "$analysis_dir"
+ $PYTHON "$ANALYSIS_SRCDIR/analyze.py" --buildcommand="$GECKO_DIR/testing/mozharness/scripts/spidermonkey/build.${build_type}"
+ )
+}
+
+function grab_artifacts () {
+ local analysis_dir
+ analysis_dir="$1"
+ local artifacts
+ artifacts="$2"
+
+ (
+ cd "$analysis_dir"
+ ls -lah
+
+ # Do not error out if no files found
+ shopt -s nullglob
+ set +e
+ for f in *.txt *.lst; do
+ gzip -9 -c "$f" > "${artifacts}/$f.gz"
+ done
+
+ # Check whether the user requested .xdb file upload in the top commit comment
+ if check_commit_msg "--upload-xdbs"; then
+ HAZ_UPLOAD_XDBS=1
+ fi
+
+ if [ -n "$HAZ_UPLOAD_XDBS" ]; then
+ for f in *.xdb; do
+ bzip2 -c "$f" > "${artifacts}/$f.bz2"
+ done
+ fi
+ )
+}
+
+function check_hazards () {
+ (
+ set +e
+ NUM_HAZARDS=$(grep -c 'Function.*has unrooted.*live across GC call' "$1"/rootingHazards.txt)
+ NUM_UNSAFE=$(grep -c '^Function.*takes unsafe address of unrooted' "$1"/refs.txt)
+ NUM_UNNECESSARY=$(grep -c '^Function.* has unnecessary root' "$1"/unnecessary.txt)
+
+ set +x
+ echo "TinderboxPrint: rooting hazards<br/>$NUM_HAZARDS"
+ echo "TinderboxPrint: unsafe references to unrooted GC pointers<br/>$NUM_UNSAFE"
+ echo "TinderboxPrint: unnecessary roots<br/>$NUM_UNNECESSARY"
+
+ if [ $NUM_HAZARDS -gt 0 ]; then
+ echo "TEST-UNEXPECTED-FAIL $NUM_HAZARDS hazards detected" >&2
+ echo "TinderboxPrint: documentation<br/><a href='https://wiki.mozilla.org/Javascript:Hazard_Builds'>static rooting hazard analysis failures</a>, visit \"Inspect Task\" link for hazard details"
+ exit 1
+ fi
+ )
+}
diff --git a/taskcluster/scripts/builder/install-packages.sh b/taskcluster/scripts/builder/install-packages.sh
new file mode 100755
index 000000000..2f5cdf489
--- /dev/null
+++ b/taskcluster/scripts/builder/install-packages.sh
@@ -0,0 +1,13 @@
+#!/bin/bash -vex
+
+gecko_dir=$1
+test -d $gecko_dir
+test -n "$TOOLTOOL_CACHE"
+test -n "$TOOLTOOL_MANIFEST"
+test -n "$TOOLTOOL_REPO"
+test -n "$TOOLTOOL_REV"
+
+tc-vcs checkout $gecko_dir/tooltool $TOOLTOOL_REPO $TOOLTOOL_REPO $TOOLTOOL_REV
+
+(cd $gecko_dir; python $gecko_dir/tooltool/tooltool.py --url https://api.pub.build.mozilla.org/tooltool/ -m $gecko_dir/$TOOLTOOL_MANIFEST fetch -c $TOOLTOOL_CACHE)
+
diff --git a/taskcluster/scripts/builder/setup-ccache.sh b/taskcluster/scripts/builder/setup-ccache.sh
new file mode 100644
index 000000000..3c03b2640
--- /dev/null
+++ b/taskcluster/scripts/builder/setup-ccache.sh
@@ -0,0 +1,9 @@
+#! /bin/bash -ex
+
+test -d $1 # workspace must exist at this point...
+WORKSPACE=$( cd "$1" && pwd )
+
+export CCACHE_DIR=$WORKSPACE/ccache
+
+ccache -M 12G
+ccache -s
diff --git a/taskcluster/scripts/builder/sm-tooltool-config.sh b/taskcluster/scripts/builder/sm-tooltool-config.sh
new file mode 100755
index 000000000..b6a062858
--- /dev/null
+++ b/taskcluster/scripts/builder/sm-tooltool-config.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+set -xe
+
+: ${TOOLTOOL_SERVER:=https://api.pub.build.mozilla.org/tooltool/}
+: ${SPIDERMONKEY_VARIANT:=plain}
+: ${UPLOAD_DIR:=$HOME/artifacts/}
+: ${WORK:=$HOME/workspace}
+: ${SRCDIR:=$WORK/build/src}
+
+mkdir -p $WORK
+cd $WORK
+
+# Need to install things from tooltool. Figure out what platform to use.
+
+case $(uname -m) in
+ i686 | arm )
+ BITS=32
+ ;;
+ *)
+ BITS=64
+ ;;
+esac
+
+case "$OSTYPE" in
+ darwin*)
+ PLATFORM_OS=macosx
+ ;;
+ linux-gnu)
+ PLATFORM_OS=linux
+ ;;
+ msys)
+ PLATFORM_OS=win
+ ;;
+ *)
+ echo "Unrecognized OSTYPE '$OSTYPE'" >&2
+ PLATFORM_OS=linux
+ ;;
+esac
+
+# Install everything needed for the browser on this platform. Not all of it is
+# necessary for the JS shell, but it's less duplication to share tooltool
+# manifests.
+BROWSER_PLATFORM=$PLATFORM_OS$BITS
+: ${TOOLTOOL_MANIFEST:=browser/config/tooltool-manifests/$BROWSER_PLATFORM/releng.manifest}
+
+: ${TOOLTOOL_CHECKOUT:=$WORK}
+export TOOLTOOL_CHECKOUT
+
+(cd $TOOLTOOL_CHECKOUT && python ${SRCDIR}/testing/docker/recipes/tooltool.py --url $TOOLTOOL_SERVER -m $SRCDIR/$TOOLTOOL_MANIFEST fetch ${TOOLTOOL_CACHE:+ -c $TOOLTOOL_CACHE})
diff --git a/taskcluster/scripts/copy.sh b/taskcluster/scripts/copy.sh
new file mode 100755
index 000000000..931145a3b
--- /dev/null
+++ b/taskcluster/scripts/copy.sh
@@ -0,0 +1,9 @@
+#! /bin/bash -ex
+
+# This script copies the contents of the "scripts" folder into a docker
+# container using tar/untar the container id must be passed.
+
+DIRNAME=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+docker exec $1 mkdir -p $2
+cd $DIRNAME
+tar -cv * | docker exec -i $1 tar -x -C $2
diff --git a/taskcluster/scripts/misc/build-binutils-linux.sh b/taskcluster/scripts/misc/build-binutils-linux.sh
new file mode 100755
index 000000000..da0eb2724
--- /dev/null
+++ b/taskcluster/scripts/misc/build-binutils-linux.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building binutils for Linux.
+
+WORKSPACE=$HOME/workspace
+HOME_DIR=$WORKSPACE/build
+UPLOAD_DIR=$WORKSPACE/artifacts
+
+cd $HOME_DIR/src
+
+build/unix/build-binutils/build-binutils.sh $HOME_DIR
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+cp $HOME_DIR/binutils.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-cctools.sh b/taskcluster/scripts/misc/build-cctools.sh
new file mode 100755
index 000000000..3eea0929d
--- /dev/null
+++ b/taskcluster/scripts/misc/build-cctools.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building cctools (Apple's binutils) for Linux using
+# crosstool-ng (https://github.com/diorcety/crosstool-ng).
+
+WORKSPACE=$HOME/workspace
+UPLOAD_DIR=$WORKSPACE/artifacts
+
+# Repository info
+: CROSSTOOL_NG_REPOSITORY ${CROSSTOOL_NG_REPOSITORY:=https://github.com/diorcety/crosstool-ng}
+: CROSSTOOL_NG_REV ${CROSSTOOL_NG_REV:=master}
+
+# hacky
+ln -s `which gcc` ~/bin/x86_64-linux-gnu-gcc
+export PATH=$PATH:~/bin
+
+# Set some crosstools-ng directories
+CT_TOP_DIR=$WORKSPACE/crosstool-ng-build
+CT_PREFIX_DIR=$WORKSPACE/cctools
+CT_SRC_DIR=$CT_TOP_DIR/src
+CT_TARBALLS_DIR=$CT_TOP_DIR
+CT_WORK_DIR=$CT_SRC_DIR
+CT_LIB_DIR=$WORKSPACE/crosstool-ng
+CT_BUILD_DIR=$CT_TOP_DIR/build
+CT_LLVM_DIR=$WORKSPACE/clang
+CT_BUILDTOOLS_PREFIX_DIR=$CT_PREFIX_DIR
+
+# Create our directories
+rm -rf $CT_TOP_DIR
+mkdir $CT_TOP_DIR
+rm -rf $CT_PREFIX_DIR
+mkdir $CT_PREFIX_DIR
+mkdir -p $CT_SRC_DIR
+
+# Clone the crosstool-ng repo
+tc-vcs checkout $CT_LIB_DIR $CROSSTOOL_NG_REPOSITORY $CROSSTOOL_NG_REPOSITORY $CROSSTOOL_NG_REV
+
+# Fetch clang from tooltool
+cd $WORKSPACE
+wget -O tooltool.py https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py
+chmod +x tooltool.py
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/home/worker/tooltool-cache}
+export TOOLTOOL_CACHE
+
+wget ${GECKO_HEAD_REPOSITORY}/raw-file/${GECKO_HEAD_REV}/browser/config/tooltool-manifests/linux64/clang.manifest
+
+python tooltool.py -v --manifest=clang.manifest fetch
+
+# Copy clang into the crosstools-ng srcdir
+cp -Rp $CT_LLVM_DIR $CT_SRC_DIR
+
+# Configure crosstools-ng
+sed=sed
+CT_CONNECT_TIMEOUT=5
+CT_BINUTILS_VERSION=809
+CT_PATCH_ORDER=bundled
+CT_BUILD=x86_64-linux-gnu
+CT_HOST=x86_64-linux-gnu
+CT_TARGET=x86_64-apple-darwin10
+CT_LLVM_FULLNAME=clang
+
+cd $CT_TOP_DIR
+
+# gets a bit too verbose here
+set +x
+
+. $CT_LIB_DIR/scripts/functions
+. $CT_LIB_DIR/scripts/build/binutils/cctools.sh
+
+# Build cctools
+do_binutils_get
+do_binutils_extract
+do_binutils_for_host
+
+set -x
+
+strip $CT_PREFIX_DIR/bin/*
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+tar czf $UPLOAD_DIR/cctools.tar.gz -C $WORKSPACE `basename $CT_PREFIX_DIR`
diff --git a/taskcluster/scripts/misc/build-clang-linux.sh b/taskcluster/scripts/misc/build-clang-linux.sh
new file mode 100755
index 000000000..e1c6f2f0d
--- /dev/null
+++ b/taskcluster/scripts/misc/build-clang-linux.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -x -e -v
+
+# This script is for building clang for Linux.
+
+WORKSPACE=$HOME/workspace
+HOME_DIR=$WORKSPACE/build
+UPLOAD_DIR=$WORKSPACE/artifacts
+
+# Fetch our toolchain from tooltool
+cd $HOME_DIR
+wget -O tooltool.py https://raw.githubusercontent.com/mozilla/build-tooltool/master/tooltool.py
+chmod +x tooltool.py
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/home/worker/tooltool-cache}
+export TOOLTOOL_CACHE
+cd src
+$HOME_DIR/tooltool.py -m browser/config/tooltool-manifests/linux64/releng.manifest fetch
+
+# gets a bit too verbose here
+set +x
+
+cd build/build-clang
+# |mach python| sets up a virtualenv for us!
+../../mach python ./build-clang.py -c clang-static-analysis-linux64.json
+
+set -x
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+cp clang.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/build-clang-windows.sh b/taskcluster/scripts/misc/build-clang-windows.sh
new file mode 100755
index 000000000..6d2acaa03
--- /dev/null
+++ b/taskcluster/scripts/misc/build-clang-windows.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+set -x -e -v
+
+# This script is for building clang-cl on Windows.
+
+# Fetch our toolchain from tooltool.
+wget -O tooltool.py ${TOOLTOOL_REPO}/raw/${TOOLTOOL_REV}/tooltool.py
+chmod +x tooltool.py
+: TOOLTOOL_CACHE ${TOOLTOOL_CACHE:=/home/worker/tooltool-cache}
+export TOOLTOOL_CACHE
+
+TOOLTOOL_AUTH_FILE=/c/builds/relengapi.tok
+if [ ! -e ${TOOLTOOL_AUTH_FILE} ]; then
+ echo cannot find ${TOOLTOOL_AUTH_FILE}
+ exit 1
+fi
+
+TOOLTOOL_MANIFEST=build/src/browser/config/tooltool-manifests/win32/build-clang-cl.manifest
+./tooltool.py --authentication-file="${TOOLTOOL_AUTH_FILE}" -m "${TOOLTOOL_MANIFEST}" fetch
+
+# Set up all the Visual Studio paths.
+MSVC_DIR=vs2015u3
+VSWINPATH="$(cd ${MSVC_DIR} && pwd)"
+
+echo vswinpath ${VSWINPATH}
+
+export WINDOWSSDKDIR="${VSWINPATH}/SDK"
+export WIN32_REDIST_DIR="${VSWINPATH}/VC/redist/x86/Microsoft.VC140.CRT"
+export WIN_UCRT_REDIST_DIR="${VSWINPATH}/SDK/Redist/ucrt/DLLs/x86"
+
+export PATH="${VSWINPATH}/VC/bin/amd64_x86:${VSWINPATH}/VC/bin/amd64:${VSWINPATH}/VC/bin:${VSWINPATH}/SDK/bin/x86:${VSWINPATH}/SDK/bin/x64:${VSWINPATH}/DIA SDK/bin:${PATH}"
+export PATH="${VSWINPATH}/VC/redist/x86/Microsoft.VC140.CRT:${VSWINPATH}/VC/redist/x64/Microsoft.VC140.CRT:${VSWINPATH}/SDK/Redist/ucrt/DLLs/x86:${VSWINPATH}/SDK/Redist/ucrt/DLLs/x64:${PATH}"
+
+export INCLUDE="${VSWINPATH}/VC/include:${VSWINPATH}/VC/atlmfc/include:${VSWINPATH}/SDK/Include/10.0.14393.0/ucrt:${VSWINPATH}/SDK/Include/10.0.14393.0/shared:${VSWINPATH}/SDK/Include/10.0.14393.0/um:${VSWINPATH}/SDK/Include/10.0.14393.0/winrt:${VSWINPATH}/DIA SDK/include"
+export LIB="${VSWINPATH}/VC/lib:${VSWINPATH}/VC/atlmfc/lib:${VSWINPATH}/SDK/lib/10.0.14393.0/ucrt/x86:${VSWINPATH}/SDK/lib/10.0.14393.0/um/x86:${VSWINPATH}/DIA SDK/lib"
+
+export PATH="$(cd svn && pwd)/bin:${PATH}"
+export PATH="$(cd cmake && pwd)/bin:${PATH}"
+export PATH="$(cd ninja && pwd)/bin:${PATH}"
+
+# We use |mach python| to set up a virtualenv automatically for us. We create
+# a dummy mozconfig, because the default machinery for config.guess-choosing
+# of the objdir doesn't work very well.
+MOZCONFIG="$(pwd)/mozconfig"
+cat > ${MOZCONFIG} <<EOF
+mk_add_options MOZ_OBJDIR=$(pwd)/objdir
+EOF
+
+# gets a bit too verbose here
+set +x
+
+BUILD_CLANG_DIR=build/src/build/build-clang
+MOZCONFIG=${MOZCONFIG} build/src/mach python ${BUILD_CLANG_DIR}/build-clang.py -c ${BUILD_CLANG_DIR}/clang-static-analysis-win32.json
+
+set -x
+
+# Put a tarball in the artifacts dir
+UPLOAD_PATH=public/build
+mkdir -p ${UPLOAD_PATH}
+cp clang.tar.* ${UPLOAD_PATH}
diff --git a/taskcluster/scripts/misc/build-gcc-linux.sh b/taskcluster/scripts/misc/build-gcc-linux.sh
new file mode 100755
index 000000000..7621ec4aa
--- /dev/null
+++ b/taskcluster/scripts/misc/build-gcc-linux.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+set -e
+
+# This script is for building GCC for Linux.
+
+WORKSPACE=$HOME/workspace
+HOME_DIR=$WORKSPACE/build
+UPLOAD_DIR=$WORKSPACE/artifacts
+
+cd $HOME_DIR/src
+
+build/unix/build-gcc/build-gcc.sh $HOME_DIR
+
+# Put a tarball in the artifacts dir
+mkdir -p $UPLOAD_DIR
+cp $HOME_DIR/gcc.tar.* $UPLOAD_DIR
diff --git a/taskcluster/scripts/misc/minidump_stackwalk.sh b/taskcluster/scripts/misc/minidump_stackwalk.sh
new file mode 100755
index 000000000..de4fd748c
--- /dev/null
+++ b/taskcluster/scripts/misc/minidump_stackwalk.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+#
+# This script builds minidump_stackwalk binaries from the Google Breakpad
+# source for all of the operating systems that we run Firefox tests on:
+# Linux x86, Linux x86-64, Windows x86, OS X x86-64.
+#
+# It expects to be run in the luser/breakpad-builder:0.7 Docker image and
+# needs access to the relengapiproxy to download internal tooltool files.
+
+set -v -e -x
+
+# This is a pain to support properly with gclient.
+#: BREAKPAD_REPO ${BREAKPAD_REPO:=https://google-breakpad.googlecode.com/svn/trunk/}
+: BREAKPAD_REV "${BREAKPAD_REV:=master}"
+: STACKWALK_HTTP_REPO "${STACKWALK_HTTP_REPO:=https://hg.mozilla.org/users/tmielczarek_mozilla.com/stackwalk-http}"
+: STACKWALK_HTTP_REV "${STACKWALK_HTTP_REV:=default}"
+
+ncpu=$(getconf _NPROCESSORS_ONLN)
+
+function build()
+{
+ cd /tmp
+ local platform=$1
+ local strip_prefix=$2
+ local configure_args=$3
+ local make_args=$4
+ local objdir=/tmp/obj-breakpad-$platform
+ local ext=
+ if test "$platform" = "win32"; then
+ ext=.exe
+ fi
+ rm -rf "$objdir"
+ mkdir "$objdir"
+ # First, build Breakpad
+ cd "$objdir"
+ # shellcheck disable=SC2086
+ CFLAGS="-O2 $CFLAGS" CXXFLAGS="-O2 $CXXFLAGS" /tmp/breakpad/src/configure --disable-tools $configure_args
+ # shellcheck disable=SC2086
+ make -j$ncpu $make_args src/libbreakpad.a src/third_party/libdisasm/libdisasm.a src/processor/stackwalk_common.o
+ # Second, build stackwalk-http
+ make -f /tmp/stackwalk-http/Makefile BREAKPAD_SRCDIR=/tmp/breakpad/src "BREAKPAD_OBJDIR=$(pwd)" "OS=$platform" "-j$ncpu"
+ "${strip_prefix}strip" "stackwalk${ext}"
+ cp "stackwalk${ext}" "/tmp/stackwalker/${platform}-minidump_stackwalk${ext}"
+}
+
+function linux64()
+{
+ export LDFLAGS="-static-libgcc -static-libstdc++"
+ build linux64
+ unset LDFLAGS
+}
+
+function linux32()
+{
+ export LDFLAGS="-static-libgcc -static-libstdc++ -L/tmp/libcurl-i386/lib"
+ export CFLAGS="-m32 -I/tmp/libcurl-i386/include"
+ export CXXFLAGS="-m32 -I/tmp/libcurl-i386/include"
+ build linux32 "" "--enable-m32"
+ unset LDFLAGS CFLAGS CXXFLAGS
+}
+
+function macosx64()
+{
+ cd /tmp
+ if ! test -d MacOSX10.7.sdk; then
+ python tooltool.py -v --manifest=macosx-sdk.manifest --url=http://relengapi/tooltool/ fetch
+ fi
+ export MACOSX_SDK=/tmp/MacOSX10.7.sdk
+ export CCTOOLS=/tmp/cctools
+ local FLAGS="-stdlib=libc++ -target x86_64-apple-darwin10 -mlinker-version=136 -B /tmp/cctools/bin -isysroot ${MACOSX_SDK} -mmacosx-version-min=10.7"
+ export CC="clang $FLAGS"
+ export CXX="clang++ $FLAGS -std=c++11"
+ local old_path="$PATH"
+ export PATH="/tmp/clang/bin:/tmp/cctools/bin/:$PATH"
+ export LD_LIBRARY_PATH=/usr/lib/llvm-3.6/lib/
+
+ build macosx64 "/tmp/cctools/bin/x86_64-apple-darwin10-" "--host=x86_64-apple-darwin10" "AR=/tmp/cctools/bin/x86_64-apple-darwin10-ar"
+
+ unset CC CXX LD_LIBRARY_PATH MACOSX_SDK CCTOOLS
+ export PATH="$old_path"
+}
+
+function win32()
+{
+ export LDFLAGS="-static-libgcc -static-libstdc++"
+ export CFLAGS="-D__USE_MINGW_ANSI_STDIO=1"
+ export CXXFLAGS="-D__USE_MINGW_ANSI_STDIO=1"
+ export ZLIB_DIR=/tmp/zlib-mingw
+ build win32 "i686-w64-mingw32-" "--host=i686-w64-mingw32"
+ unset LDFLAGS CFLAGS CXXFLAGS ZLIB_DIR
+}
+
+cd /tmp
+if ! test -d depot_tools; then
+ git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
+else
+ (cd depot_tools; git pull origin master)
+fi
+export PATH=$(pwd)/depot_tools:"$PATH"
+if ! test -d breakpad; then
+ mkdir breakpad
+ pushd breakpad
+ fetch breakpad
+ popd
+else
+ pushd breakpad/src
+ git pull origin master
+ popd
+fi
+pushd breakpad/src
+git checkout "${BREAKPAD_REV}"
+gclient sync
+popd
+
+(cd breakpad/src; git rev-parse master)
+if ! test -d stackwalk-http; then
+ hg clone -u "$STACKWALK_HTTP_REV" "$STACKWALK_HTTP_REPO"
+else
+ (cd stackwalk-http && hg pull "$STACKWALK_HTTP_REPO" && hg up "$STACKWALK_HTTP_REV")
+fi
+mkdir -p stackwalker
+linux64
+linux32
+macosx64
+win32
diff --git a/taskcluster/scripts/misc/repackage-jdk-centos.sh b/taskcluster/scripts/misc/repackage-jdk-centos.sh
new file mode 100755
index 000000000..2c952602b
--- /dev/null
+++ b/taskcluster/scripts/misc/repackage-jdk-centos.sh
@@ -0,0 +1,45 @@
+#! /bin/bash
+
+set -e -x
+
+mkdir -p artifacts
+pushd build
+
+rm -rf root && mkdir root && cd root
+
+# change these variables when updating java version
+mirror_url_base="http://mirror.centos.org/centos/6.7/updates/x86_64/Packages"
+openjdk=java-1.7.0-openjdk-1.7.0.85-2.6.1.3.el6_7.x86_64.rpm
+openjdk_devel=java-1.7.0-openjdk-devel-1.7.0.85-2.6.1.3.el6_7.x86_64.rpm
+jvm_openjdk_dir=java-1.7.0-openjdk-1.7.0.85.x86_64
+
+# grab the rpm and unpack it
+wget ${mirror_url_base}/${openjdk}
+wget ${mirror_url_base}/${openjdk_devel}
+rpm2cpio $openjdk | cpio -ivd
+rpm2cpio $openjdk_devel | cpio -ivd
+
+cd usr/lib/jvm
+mv $jvm_openjdk_dir java_home
+
+# cacerts is a relative symlink, which doesn't work when we repackage. Make it
+# absolute. We could use tar's --dereference option, but there's a subtle
+# difference between making the symlink absolute and using --dereference.
+# Making the symlink absolute lets the consuming system set the cacerts; using
+# --dereference takes the producing system's cacerts and sets them in stone. We
+# prefer the flexibility of the former.
+rm java_home/jre/lib/security/cacerts
+ln -s /etc/pki/java/cacerts java_home/jre/lib/security/cacerts
+
+# document version this is based on
+echo "Built from ${mirror_url_Base}
+ ${openjdk}
+ ${openjdk_devel}
+
+Run through rpm2cpio | cpio, and /usr/lib/jvm/${jvm_openjdk_dir} renamed to 'java_home'." > java_home/VERSION
+
+# tarball the unpacked rpm and put it in the taskcluster upload artifacts dir
+tar -Jvcf java_home-${jvm_openjdk_dir}.tar.xz java_home
+popd
+
+mv build/root/usr/lib/jvm/java_home-${jvm_openjdk_dir}.tar.xz artifacts
diff --git a/taskcluster/scripts/tester/harness-test-linux.sh b/taskcluster/scripts/tester/harness-test-linux.sh
new file mode 100644
index 000000000..b38ffa124
--- /dev/null
+++ b/taskcluster/scripts/tester/harness-test-linux.sh
@@ -0,0 +1,40 @@
+#! /bin/bash -vex
+
+set -x -e
+
+echo "running as" $(id)
+
+####
+# Taskcluster friendly wrapper for running a script in
+# testing/mozharness/scripts in a source checkout (no build).
+# Example use: Python-only harness unit tests
+####
+
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+: SRC_ROOT ${SRC_ROOT:=$WORKSPACE/build/src}
+# These paths should be relative to $SRC_ROOT
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: mozharness args "${@}"
+
+set -v
+cd $WORKSPACE
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[harness-test-linux.sh:error]" "${@}"
+ exit 1
+}
+
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config-file ${SRC_ROOT}/${cfg}"
+done
+
+python2.7 $SRC_ROOT/${MOZHARNESS_SCRIPT} ${config_cmds} "${@}"
+
+
+
diff --git a/taskcluster/scripts/tester/run-wizard b/taskcluster/scripts/tester/run-wizard
new file mode 100755
index 000000000..5dafb0b62
--- /dev/null
+++ b/taskcluster/scripts/tester/run-wizard
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this,
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import print_function, unicode_literals
+
+import datetime
+import os
+import subprocess
+import sys
+import time
+from textwrap import wrap
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+def call(cmd, **kwargs):
+ print(" ".join(cmd))
+ return subprocess.call(cmd, **kwargs)
+
+
+def wait_for_run_mozharness(timeout=30):
+ starttime = datetime.datetime.now()
+ while datetime.datetime.now() - starttime < datetime.timedelta(seconds=timeout):
+ if os.path.isfile(os.path.join(here, 'run-mozharness')):
+ break
+ time.sleep(0.2)
+ else:
+ print("Timed out after %d seconds waiting for the 'run-mozharness' binary" % timeout)
+ return 1
+
+
+def resume():
+ wait_for_run_mozharness()
+ call(['run-mozharness'])
+
+
+def setup():
+ """Run the mozharness script without the 'run-tests' action.
+
+ This will do all the necessary setup steps like creating a virtualenv and
+ downloading the tests and firefox binary. But it stops before running the
+ tests.
+ """
+ wait_for_run_mozharness()
+ status = call(['run-mozharness', '--no-run-tests'])
+
+ if status:
+ # something went wrong
+ return status
+
+ build_dir = os.path.expanduser(os.path.join('~', 'workspace', 'build'))
+ mach_src = os.path.join(build_dir, 'tests', 'mach')
+ mach_dest = os.path.expanduser(os.path.join('~', 'bin', 'mach'))
+
+ if os.path.exists(mach_dest):
+ os.remove(mach_dest)
+ os.symlink(mach_src, mach_dest)
+
+ activate = os.path.join(build_dir, 'venv', 'bin', 'activate')
+ if os.path.isfile(activate):
+ # TODO Support other shells
+ bashrc = os.path.expanduser(os.path.join('~', '.bashrc'))
+ with open(bashrc, 'ab') as f:
+ f.write(". {}".format(activate))
+
+ print("""
+Mozharness has finished downloading the build and tests to:
+{}
+
+A limited mach environment has also been set up and added to the $PATH, but
+it may be missing the command you need. To see a list of commands, run:
+ $ mach help
+""".lstrip().format(build_dir))
+
+
+def clone():
+ """Clone the correct gecko repository and update to the proper revision."""
+ base_repo = os.environ['GECKO_HEAD_REPOSITORY']
+ dest = os.path.expanduser(os.path.join('~', 'gecko'))
+
+ # Specify method to checkout a revision. This defaults to revisions as
+ # SHA-1 strings, but also supports symbolic revisions like `tip` via the
+ # branch flag.
+ if os.environ.get('GECKO_HEAD_REV'):
+ revision_flag = b'--revision'
+ revision = os.environ['GECKO_HEAD_REV']
+ elif os.environ.get('GECKO_HEAD_REF'):
+ revision_flag = b'--branch'
+ revision = os.environ['GECKO_HEAD_REF']
+ else:
+ print('revision is not specified for checkout')
+ return 1
+
+ # TODO Bug 1301382 - pin hg.mozilla.org fingerprint.
+ call([
+ b'/usr/bin/hg', b'robustcheckout',
+ b'--sharebase', os.environ['HG_STORE_PATH'],
+ b'--purge',
+ b'--upstream', b'https://hg.mozilla.org/mozilla-unified',
+ revision_flag, revision,
+ base_repo, dest
+ ])
+ print("Finished cloning to {} at revision {}.".format(dest, revision))
+
+
+def exit():
+ pass
+
+
+OPTIONS = [
+ ('Resume task', resume,
+ "Resume the original task without modification. This can be useful for "
+ "passively monitoring it from another shell."),
+ ('Setup task', setup,
+ "Setup the task (download the application and tests) but don't run the "
+ "tests just yet. The tests can be run with a custom configuration later. "
+ "This will provide a mach environment (experimental)."),
+ ('Clone gecko', clone,
+ "Perform a clone of gecko using the task's repo and update it to the "
+ "task's revision."),
+ ('Exit', exit, "Exit this wizard and return to the shell.")
+]
+
+
+def _fmt_options():
+ max_line_len = 60
+ max_name_len = max(len(o[0]) for o in OPTIONS)
+
+ # TODO Pad will be off if there are more than 9 options.
+ pad = ' ' * (max_name_len+6)
+
+ msg = []
+ for i, (name, _, desc) in enumerate(OPTIONS):
+ desc = wrap(desc, width=max_line_len)
+ desc = [desc[0]] + [pad + l for l in desc[1:]]
+
+ optstr = '{}) {} - {}\n'.format(
+ i+1, name.ljust(max_name_len), '\n'.join(desc))
+ msg.append(optstr)
+ msg.append("Select one of the above options: ")
+ return '\n'.join(msg)
+
+
+def wizard():
+ print("This wizard can help you get started with some common debugging "
+ "workflows.\nWhat would you like to do?\n")
+ print(_fmt_options(), end="")
+ choice = None
+ while True:
+ choice = raw_input().decode('utf8')
+ try:
+ choice = int(choice)-1
+ if 0 <= choice < len(OPTIONS):
+ break
+ except ValueError:
+ pass
+
+ print("Must provide an integer from 1-{}:".format(len(OPTIONS)))
+
+ func = OPTIONS[choice][1]
+ ret = func()
+
+ print("Use the 'run-wizard' command to start this wizard again.")
+ return ret
+
+
+if __name__ == '__main__':
+ sys.exit(wizard())
diff --git a/taskcluster/scripts/tester/test-b2g.sh b/taskcluster/scripts/tester/test-b2g.sh
new file mode 100644
index 000000000..43a54f93a
--- /dev/null
+++ b/taskcluster/scripts/tester/test-b2g.sh
@@ -0,0 +1,118 @@
+#! /bin/bash -xe
+
+set -x -e
+
+echo "running as" $(id)
+
+####
+# Taskcluster friendly wrapper for performing fx desktop tests via mozharness.
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_URL ${MOZHARNESS_URL}
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: NEED_XVFB ${NEED_XVFB:=true}
+: NEED_PULSEAUDIO ${NEED_PULSEAUDIO:=false}
+: NEED_PULL_GAIA ${NEED_PULL_GAIA:=false}
+: SKIP_MOZHARNESS_RUN ${SKIP_MOZHARNESS_RUN:=false}
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+: mozharness args "${@}"
+
+set -v
+cd $WORKSPACE
+
+# test required parameters are supplied
+if [[ -z ${MOZHARNESS_URL} ]]; then exit 1; fi
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then exit 1; fi
+if [[ -z ${MOZHARNESS_CONFIG} ]]; then exit 1; fi
+
+mkdir -p ~/artifacts/public
+
+cleanup() {
+ if [ -n "$xvfb_pid" ]; then
+ kill $xvfb_pid || true
+ fi
+}
+trap cleanup EXIT INT
+
+# Unzip the mozharness ZIP file created by the build task
+curl --fail -o mozharness.zip --retry 10 -L $MOZHARNESS_URL
+rm -rf mozharness
+unzip -q mozharness.zip
+rm mozharness.zip
+
+if ! [ -d mozharness ]; then
+ echo "mozharness zip did not contain mozharness/"
+ exit 1
+fi
+
+# start up the pulseaudio daemon. Note that it's important this occur
+# before the Xvfb startup.
+if $NEED_PULSEAUDIO; then
+ pulseaudio --fail --daemonize --start
+ pactl load-module module-null-sink
+fi
+
+# run XVfb in the background, if necessary
+if $NEED_XVFB; then
+ Xvfb :0 -nolisten tcp -screen 0 1600x1200x24 \
+ > ~/artifacts/public/xvfb.log 2>&1 &
+ export DISPLAY=:0
+ xvfb_pid=$!
+ # Only error code 255 matters, because it signifies that no
+ # display could be opened. As long as we can open the display
+ # tests should work. We'll retry a few times with a sleep before
+ # failing.
+ retry_count=0
+ max_retries=2
+ xvfb_test=0
+ until [ $retry_count -gt $max_retries ]; do
+ xvinfo || xvfb_test=$?
+ if [ $xvfb_test != 255 ]; then
+ retry_count=$(($max_retries + 1))
+ else
+ retry_count=$(($retry_count + 1))
+ echo "Failed to start Xvfb, retry: $retry_count"
+ sleep 2
+ fi done
+ if [ $xvfb_test == 255 ]; then exit 255; fi
+fi
+
+gaia_cmds=""
+if $NEED_PULL_GAIA; then
+ # test required parameters are supplied
+ if [[ -z ${GAIA_BASE_REPOSITORY} ]]; then exit 1; fi
+ if [[ -z ${GAIA_HEAD_REPOSITORY} ]]; then exit 1; fi
+ if [[ -z ${GAIA_REV} ]]; then exit 1; fi
+ if [[ -z ${GAIA_REF} ]]; then exit 1; fi
+
+ tc-vcs checkout \
+ ${WORKSPACE}/gaia \
+ ${GAIA_BASE_REPOSITORY} \
+ ${GAIA_HEAD_REPOSITORY} \
+ ${GAIA_REV} \
+ ${GAIA_REF}
+
+ gaia_cmds="--gaia-dir=${WORKSPACE}"
+fi
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config-file ${cfg}"
+done
+
+if [ ${SKIP_MOZHARNESS_RUN} == true ]; then
+ # Skipping Mozharness is to allow the developer start the window manager
+ # properly and letting them change the execution of Mozharness without
+ # exiting the container
+ echo "We skipped running Mozharness."
+ echo "Make sure you export DISPLAY=:0 before calling Mozharness."
+ echo "Don't forget to call it with 'sudo -E -u worker'."
+else
+ # run the given mozharness script and configs, but pass the rest of the
+ # arguments in from our own invocation
+ python2.7 $WORKSPACE/${MOZHARNESS_SCRIPT} ${config_cmds} ${gaia_cmds} "${@}"
+fi
diff --git a/taskcluster/scripts/tester/test-macosx.sh b/taskcluster/scripts/tester/test-macosx.sh
new file mode 100644
index 000000000..8c2b758fb
--- /dev/null
+++ b/taskcluster/scripts/tester/test-macosx.sh
@@ -0,0 +1,77 @@
+#! /bin/bash -xe
+
+set -x -e
+
+echo "running as" $(id)
+
+####
+# Taskcluster friendly wrapper for performing fx Mac OSX tests via mozharness.
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_URL ${MOZHARNESS_URL}
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+
+WORKSPACE=$HOME
+cd $WORKSPACE
+
+rm -rf artifacts
+mkdir artifacts
+
+# test required parameters are supplied
+if [[ -z ${MOZHARNESS_URL} ]]; then fail "MOZHARNESS_URL is not set"; fi
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z ${MOZHARNESS_CONFIG} ]]; then fail "MOZHARNESS_CONFIG is not set"; fi
+
+# Download mozharness with exponential backoff
+# curl already applies exponential backoff, but not for all
+# failed cases, apparently, as we keep getting failed downloads
+# with 404 code.
+download_mozharness() {
+ local max_attempts=10
+ local timeout=1
+ local attempt=0
+
+ echo "Downloading mozharness"
+
+ while [[ $attempt < $max_attempts ]]; do
+ if curl --fail -o mozharness.zip --retry 10 -L $MOZHARNESS_URL; then
+ rm -rf mozharness
+ if unzip -q mozharness.zip; then
+ return 0
+ fi
+ echo "error unzipping mozharness.zip" >&2
+ else
+ echo "failed to download mozharness zip" >&2
+ fi
+ echo "Download failed, retrying in $timeout seconds..." >&2
+ sleep $timeout
+ timeout=$((timeout*2))
+ attempt=$((attempt+1))
+ done
+
+ fail "Failed to download and unzip mozharness"
+}
+
+download_mozharness
+rm mozharness.zip
+
+# For telemetry purposes, the build process wants information about the
+# source it is running; tc-vcs obscures this a little, but we can provide
+# it directly.
+export MOZ_SOURCE_REPO="${GECKO_HEAD_REPOSITORY}"
+export MOZ_SOURCE_CHANGESET="${GECKO_HEAD_REV}"
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config-file ${cfg}"
+done
+
+rm -rf build logs properties target.dmg
+
+# run the given mozharness script and configs, but pass the rest of the
+# arguments in from our own invocation
+python2.7 $WORKSPACE/mozharness/scripts/${MOZHARNESS_SCRIPT} ${config_cmds} "${@}"
diff --git a/taskcluster/scripts/tester/test-ubuntu.sh b/taskcluster/scripts/tester/test-ubuntu.sh
new file mode 100644
index 000000000..0c2ccc702
--- /dev/null
+++ b/taskcluster/scripts/tester/test-ubuntu.sh
@@ -0,0 +1,188 @@
+#! /bin/bash -xe
+
+set -x -e
+
+echo "running as" $(id)
+
+# Detect release version.
+. /etc/lsb-release
+if [ "${DISTRIB_RELEASE}" == "12.04" ]; then
+ UBUNTU_1204=1
+elif [ "${DISTRIB_RELEASE}" == "16.04" ]; then
+ UBUNTU_1604=1
+fi
+
+. /home/worker/scripts/xvfb.sh
+
+####
+# Taskcluster friendly wrapper for performing fx desktop tests via mozharness.
+####
+
+# Inputs, with defaults
+
+: MOZHARNESS_PATH ${MOZHARNESS_PATH}
+: MOZHARNESS_URL ${MOZHARNESS_URL}
+: MOZHARNESS_SCRIPT ${MOZHARNESS_SCRIPT}
+: MOZHARNESS_CONFIG ${MOZHARNESS_CONFIG}
+: NEED_XVFB ${NEED_XVFB:=true}
+: NEED_WINDOW_MANAGER ${NEED_WINDOW_MANAGER:=false}
+: NEED_PULSEAUDIO ${NEED_PULSEAUDIO:=false}
+: START_VNC ${START_VNC:=false}
+: TASKCLUSTER_INTERACTIVE ${TASKCLUSTER_INTERACTIVE:=false}
+: WORKSPACE ${WORKSPACE:=/home/worker/workspace}
+: mozharness args "${@}"
+
+set -v
+cd $WORKSPACE
+
+fail() {
+ echo # make sure error message is on a new line
+ echo "[test-linux.sh:error]" "${@}"
+ exit 1
+}
+
+maybe_start_pulse() {
+ if $NEED_PULSEAUDIO; then
+ pulseaudio --fail --daemonize --start
+ pactl load-module module-null-sink
+ fi
+}
+
+# test required parameters are supplied
+if [ -z "${MOZHARNESS_PATH}" -a -z "${MOZHARNESS_URL}" ]; then
+ fail "MOZHARNESS_PATH or MOZHARNESS_URL must be defined";
+fi
+
+if [[ -z ${MOZHARNESS_SCRIPT} ]]; then fail "MOZHARNESS_SCRIPT is not set"; fi
+if [[ -z ${MOZHARNESS_CONFIG} ]]; then fail "MOZHARNESS_CONFIG is not set"; fi
+
+mkdir -p ~/artifacts/public
+
+cleanup() {
+ local rv=$?
+ if [[ -s /home/worker/.xsession-errors ]]; then
+ # To share X issues
+ cp /home/worker/.xsession-errors ~/artifacts/public/xsession-errors.log
+ fi
+ cleanup_xvfb
+ exit $rv
+}
+trap cleanup EXIT INT
+
+# Download mozharness with exponential backoff
+# curl already applies exponential backoff, but not for all
+# failed cases, apparently, as we keep getting failed downloads
+# with 404 code.
+download_mozharness() {
+ local max_attempts=10
+ local timeout=1
+ local attempt=0
+
+ echo "Downloading mozharness"
+
+ while [[ $attempt < $max_attempts ]]; do
+ if curl --fail -o mozharness.zip --retry 10 -L $MOZHARNESS_URL; then
+ rm -rf mozharness
+ if unzip -q mozharness.zip; then
+ return 0
+ fi
+ echo "error unzipping mozharness.zip" >&2
+ else
+ echo "failed to download mozharness zip" >&2
+ fi
+ echo "Download failed, retrying in $timeout seconds..." >&2
+ sleep $timeout
+ timeout=$((timeout*2))
+ attempt=$((attempt+1))
+ done
+
+ fail "Failed to download and unzip mozharness"
+}
+
+# Download mozharness if we're told to.
+if [ ${MOZHARNESS_URL} ]; then
+ download_mozharness
+ rm mozharness.zip
+
+ if ! [ -d mozharness ]; then
+ fail "mozharness zip did not contain mozharness/"
+ fi
+
+ MOZHARNESS_PATH=`pwd`/mozharness
+fi
+
+# pulseaudio daemon must be started before xvfb on Ubuntu 12.04.
+if [ "${UBUNTU_1204}" ]; then
+ maybe_start_pulse
+fi
+
+# run XVfb in the background, if necessary
+if $NEED_XVFB; then
+ start_xvfb '1600x1200x24' 0
+fi
+
+if $START_VNC; then
+ x11vnc > ~/artifacts/public/x11vnc.log 2>&1 &
+fi
+
+if $NEED_WINDOW_MANAGER; then
+ # This is read by xsession to select the window manager
+ echo DESKTOP_SESSION=ubuntu > /home/worker/.xsessionrc
+
+ # note that doing anything with this display before running Xsession will cause sadness (like,
+ # crashes in compiz). Make sure that X has enough time to start
+ sleep 15
+ # DISPLAY has already been set above
+ # XXX: it would be ideal to add a semaphore logic to make sure that the
+ # window manager is ready
+ /etc/X11/Xsession 2>&1 &
+
+ # Turn off the screen saver and screen locking
+ gsettings set org.gnome.desktop.screensaver idle-activation-enabled false
+ gsettings set org.gnome.desktop.screensaver lock-enabled false
+ gsettings set org.gnome.desktop.screensaver lock-delay 3600
+ # Disable the screen saver
+ xset s off s reset
+
+ if [ "${UBUNTU_1604}" ]; then
+ # start compiz for our window manager
+ compiz 2>&1 &
+ #TODO: how to determine if compiz starts correctly?
+ fi
+fi
+
+if [ "${UBUNTU_1604}" ]; then
+ maybe_start_pulse
+fi
+
+# For telemetry purposes, the build process wants information about the
+# source it is running; tc-vcs obscures this a little, but we can provide
+# it directly.
+export MOZ_SOURCE_REPO="${GECKO_HEAD_REPOSITORY}"
+export MOZ_SOURCE_CHANGESET="${GECKO_HEAD_REV}"
+
+# support multiple, space delimited, config files
+config_cmds=""
+for cfg in $MOZHARNESS_CONFIG; do
+ config_cmds="${config_cmds} --config-file ${MOZHARNESS_PATH}/configs/${cfg}"
+done
+
+mozharness_bin="/home/worker/bin/run-mozharness"
+
+# Save the computed mozharness command to a binary which is useful
+# for interactive mode.
+echo -e "#!/usr/bin/env bash
+# Some mozharness scripts assume base_work_dir is in
+# the current working directory, see bug 1279237
+cd $WORKSPACE
+cmd=\"python2.7 ${MOZHARNESS_PATH}/scripts/${MOZHARNESS_SCRIPT} ${config_cmds} ${@} \${@}\"
+echo \"Running: \${cmd}\"
+exec \${cmd}" > ${mozharness_bin}
+chmod +x ${mozharness_bin}
+
+# In interactive mode, the user will be prompted with options for what to do.
+if ! $TASKCLUSTER_INTERACTIVE; then
+ # run the given mozharness script and configs, but pass the rest of the
+ # arguments in from our own invocation
+ ${mozharness_bin};
+fi
diff --git a/taskcluster/taskgraph/__init__.py b/taskcluster/taskgraph/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/taskcluster/taskgraph/__init__.py
diff --git a/taskcluster/taskgraph/action.py b/taskcluster/taskgraph/action.py
new file mode 100644
index 000000000..608fe3370
--- /dev/null
+++ b/taskcluster/taskgraph/action.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import json
+import logging
+import requests
+import yaml
+
+from .create import create_tasks
+from .decision import write_artifact
+from .optimize import optimize_task_graph
+from .taskgraph import TaskGraph
+
+logger = logging.getLogger(__name__)
+TASKCLUSTER_QUEUE_URL = "https://queue.taskcluster.net/v1/task/"
+
+
+def taskgraph_action(options):
+ """
+ Run the action task. This function implements `mach taskgraph action-task`,
+ and is responsible for
+
+ * creating taskgraph of tasks asked for in parameters with respect to
+ a given gecko decision task and schedule these jobs.
+ """
+
+ decision_task_id = options['decision_id']
+ # read in the full graph for reference
+ full_task_json = get_artifact(decision_task_id, "public/full-task-graph.json")
+ decision_params = get_artifact(decision_task_id, "public/parameters.yml")
+ all_tasks, full_task_graph = TaskGraph.from_json(full_task_json)
+
+ target_tasks = set(options['task_labels'].split(','))
+ target_graph = full_task_graph.graph.transitive_closure(target_tasks)
+ target_task_graph = TaskGraph(
+ {l: all_tasks[l] for l in target_graph.nodes},
+ target_graph)
+
+ existing_tasks = get_artifact(decision_task_id, "public/label-to-taskid.json")
+
+ # We don't want to optimize target tasks since they have been requested by user
+ # Hence we put `target_tasks under` `do_not_optimize`
+ optimized_graph, label_to_taskid = optimize_task_graph(target_task_graph=target_task_graph,
+ params=decision_params,
+ do_not_optimize=target_tasks,
+ existing_tasks=existing_tasks)
+
+ # write out the optimized task graph to describe what will actually happen,
+ # and the map of labels to taskids
+ write_artifact('task-graph.json', optimized_graph.to_json())
+ write_artifact('label-to-taskid.json', label_to_taskid)
+ # actually create the graph
+ create_tasks(optimized_graph, label_to_taskid, decision_params)
+
+
+def get_artifact(task_id, path):
+ url = TASKCLUSTER_QUEUE_URL + task_id + "/artifacts/" + path
+ resp = requests.get(url=url)
+ if path.endswith('.json'):
+ artifact = json.loads(resp.text)
+ elif path.endswith('.yml'):
+ artifact = yaml.load(resp.text)
+ return artifact
diff --git a/taskcluster/taskgraph/action.yml b/taskcluster/taskgraph/action.yml
new file mode 100644
index 000000000..c816f4d5c
--- /dev/null
+++ b/taskcluster/taskgraph/action.yml
@@ -0,0 +1,74 @@
+---
+created: '{{now}}'
+deadline: '{{#from_now}}1 day{{/from_now}}'
+expires: '{{#from_now}}14 day{{/from_now}}'
+metadata:
+ owner: mozilla-taskcluster-maintenance@mozilla.com
+ source: 'https://hg.mozilla.org/{{project}}/file/{{head_rev}}/taskcluster/taskgraph/action.yml'
+ name: "[tc] Action Task"
+ description: Helps schedule new jobs without new push
+
+workerType: "gecko-decision"
+provisionerId: "aws-provisioner-v1"
+schedulerId: "gecko-level-{{level}}"
+
+tags:
+ createdForUser: {{owner}}
+
+scopes:
+ # Bug 1269443: cache scopes, etc. must be listed explicitly
+ - "docker-worker:cache:level-1-*"
+ - "docker-worker:cache:tooltool-cache"
+ - "secrets:get:project/taskcluster/gecko/hgfingerprint"
+ - "assume:repo:hg.mozilla.org/try:*"
+
+routes:
+ - "tc-treeherder.v2.{{project}}.{{head_rev}}.{{pushlog_id}}"
+ - "tc-treeherder-stage.v2.{{project}}.{{head_rev}}.{{pushlog_id}}"
+
+payload:
+ env:
+ GECKO_BASE_REPOSITORY: 'https://hg.mozilla.org/mozilla-unified'
+ GECKO_HEAD_REPOSITORY: '{{{head_repository}}}'
+ GECKO_HEAD_REF: '{{head_ref}}'
+ GECKO_HEAD_REV: '{{head_rev}}'
+ HG_STORE_PATH: /home/worker/checkouts/hg-store
+
+ cache:
+ level-{{level}}-checkouts: /home/worker/checkouts
+
+ features:
+ taskclusterProxy: true
+
+ # Note: This task is built server side without the context or tooling that
+ # exist in tree so we must hard code the version
+ image: 'taskcluster/decision:0.1.7'
+
+ # Virtually no network or other potentially risky operations happen as part
+ # of the task timeout aside from the initial clone. We intentionally have
+ # set this to a lower value _all_ decision tasks should use a root
+ # repository which is cached.
+ maxRunTime: 1800
+
+ command:
+ - /home/worker/bin/run-task
+ - '--vcs-checkout=/home/worker/checkouts/gecko'
+ - '--'
+ - bash
+ - -cx
+ - >
+ cd /home/worker/checkouts/gecko &&
+ ln -s /home/worker/artifacts artifacts &&
+ ./mach --log-no-times taskgraph action-task
+ --decision-id='{{decision_task_id}}'
+ --task-label='{{task_labels}}'
+
+ artifacts:
+ 'public':
+ type: 'directory'
+ path: '/home/worker/artifacts'
+ expires: '{{#from_now}}7 days{{/from_now}}'
+
+extra:
+ treeherder:
+ symbol: A
diff --git a/taskcluster/taskgraph/create.py b/taskcluster/taskgraph/create.py
new file mode 100644
index 000000000..f577f8873
--- /dev/null
+++ b/taskcluster/taskgraph/create.py
@@ -0,0 +1,122 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import concurrent.futures as futures
+import requests
+import requests.adapters
+import json
+import os
+import logging
+
+from slugid import nice as slugid
+from taskgraph.util.time import (
+ current_json_time,
+ json_time_from_now
+)
+
+logger = logging.getLogger(__name__)
+
+# the maximum number of parallel createTask calls to make
+CONCURRENCY = 50
+
+
+def create_tasks(taskgraph, label_to_taskid, params):
+ taskid_to_label = {t: l for l, t in label_to_taskid.iteritems()}
+
+ session = requests.Session()
+
+ # Default HTTPAdapter uses 10 connections. Mount custom adapter to increase
+ # that limit. Connections are established as needed, so using a large value
+ # should not negatively impact performance.
+ http_adapter = requests.adapters.HTTPAdapter(pool_connections=CONCURRENCY,
+ pool_maxsize=CONCURRENCY)
+ session.mount('https://', http_adapter)
+ session.mount('http://', http_adapter)
+
+ decision_task_id = os.environ.get('TASK_ID')
+
+ # when running as an actual decision task, we use the decision task's
+ # taskId as the taskGroupId. The process that created the decision task
+ # helpfully placed it in this same taskGroup. If there is no $TASK_ID,
+ # fall back to a slugid
+ task_group_id = decision_task_id or slugid()
+ scheduler_id = 'gecko-level-{}'.format(params['level'])
+
+ with futures.ThreadPoolExecutor(CONCURRENCY) as e:
+ fs = {}
+
+ # We can't submit a task until its dependencies have been submitted.
+ # So our strategy is to walk the graph and submit tasks once all
+ # their dependencies have been submitted.
+ #
+ # Using visit_postorder() here isn't the most efficient: we'll
+ # block waiting for dependencies of task N to submit even though
+ # dependencies for task N+1 may be finished. If we need to optimize
+ # this further, we can build a graph of task dependencies and walk
+ # that.
+ for task_id in taskgraph.graph.visit_postorder():
+ task_def = taskgraph.tasks[task_id].task
+ attributes = taskgraph.tasks[task_id].attributes
+ # if this task has no dependencies, make it depend on this decision
+ # task so that it does not start immediately; and so that if this loop
+ # fails halfway through, none of the already-created tasks run.
+ if decision_task_id and not task_def.get('dependencies'):
+ task_def['dependencies'] = [decision_task_id]
+
+ task_def['taskGroupId'] = task_group_id
+ task_def['schedulerId'] = scheduler_id
+
+ # Wait for dependencies before submitting this.
+ deps_fs = [fs[dep] for dep in task_def.get('dependencies', [])
+ if dep in fs]
+ for f in futures.as_completed(deps_fs):
+ f.result()
+
+ fs[task_id] = e.submit(_create_task, session, task_id,
+ taskid_to_label[task_id], task_def)
+
+ # Schedule tasks as many times as task_duplicates indicates
+ for i in range(1, attributes.get('task_duplicates', 1)):
+ # We use slugid() since we want a distinct task id
+ fs[task_id] = e.submit(_create_task, session, slugid(),
+ taskid_to_label[task_id], task_def)
+
+ # Wait for all futures to complete.
+ for f in futures.as_completed(fs.values()):
+ f.result()
+
+
+def _create_task(session, task_id, label, task_def):
+ # create the task using 'http://taskcluster/queue', which is proxied to the queue service
+ # with credentials appropriate to this job.
+
+ # Resolve timestamps
+ now = current_json_time(datetime_format=True)
+ task_def = resolve_timestamps(now, task_def)
+
+ logger.debug("Creating task with taskId {} for {}".format(task_id, label))
+ res = session.put('http://taskcluster/queue/v1/task/{}'.format(task_id),
+ data=json.dumps(task_def))
+ if res.status_code != 200:
+ try:
+ logger.error(res.json()['message'])
+ except:
+ logger.error(res.text)
+ res.raise_for_status()
+
+
+def resolve_timestamps(now, task_def):
+ def recurse(val):
+ if isinstance(val, list):
+ return [recurse(v) for v in val]
+ elif isinstance(val, dict):
+ if val.keys() == ['relative-datestamp']:
+ return json_time_from_now(val['relative-datestamp'], now)
+ else:
+ return {k: recurse(v) for k, v in val.iteritems()}
+ else:
+ return val
+ return recurse(task_def)
diff --git a/taskcluster/taskgraph/decision.py b/taskcluster/taskgraph/decision.py
new file mode 100644
index 000000000..d6460390f
--- /dev/null
+++ b/taskcluster/taskgraph/decision.py
@@ -0,0 +1,181 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+import json
+import logging
+
+import time
+import yaml
+
+from .generator import TaskGraphGenerator
+from .create import create_tasks
+from .parameters import Parameters
+from .target_tasks import get_method
+from .taskgraph import TaskGraph
+
+from taskgraph.util.templates import Templates
+from taskgraph.util.time import (
+ json_time_from_now,
+ current_json_time,
+)
+
+logger = logging.getLogger(__name__)
+
+ARTIFACTS_DIR = 'artifacts'
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
+
+# For each project, this gives a set of parameters specific to the project.
+# See `taskcluster/docs/parameters.rst` for information on parameters.
+PER_PROJECT_PARAMETERS = {
+ 'try': {
+ 'target_tasks_method': 'try_option_syntax',
+ # Always perform optimization. This makes it difficult to use try
+ # pushes to run a task that would otherwise be optimized, but is a
+ # compromise to avoid essentially disabling optimization in try.
+ 'optimize_target_tasks': True,
+ },
+
+ 'ash': {
+ 'target_tasks_method': 'ash_tasks',
+ 'optimize_target_tasks': True,
+ },
+
+ 'cedar': {
+ 'target_tasks_method': 'cedar_tasks',
+ 'optimize_target_tasks': True,
+ },
+
+ # the default parameters are used for projects that do not match above.
+ 'default': {
+ 'target_tasks_method': 'default',
+ 'optimize_target_tasks': True,
+ }
+}
+
+
+def taskgraph_decision(options):
+ """
+ Run the decision task. This function implements `mach taskgraph decision`,
+ and is responsible for
+
+ * processing decision task command-line options into parameters
+ * running task-graph generation exactly the same way the other `mach
+ taskgraph` commands do
+ * generating a set of artifacts to memorialize the graph
+ * calling TaskCluster APIs to create the graph
+ """
+
+ parameters = get_decision_parameters(options)
+
+ # create a TaskGraphGenerator instance
+ target_tasks_method = parameters.get('target_tasks_method', 'all_tasks')
+ target_tasks_method = get_method(target_tasks_method)
+ tgg = TaskGraphGenerator(
+ root_dir=options['root'],
+ parameters=parameters,
+ target_tasks_method=target_tasks_method)
+
+ # write out the parameters used to generate this graph
+ write_artifact('parameters.yml', dict(**parameters))
+
+ # write out the yml file for action tasks
+ write_artifact('action.yml', get_action_yml(parameters))
+
+ # write out the full graph for reference
+ full_task_json = tgg.full_task_graph.to_json()
+ write_artifact('full-task-graph.json', full_task_json)
+
+ # this is just a test to check whether the from_json() function is working
+ _, _ = TaskGraph.from_json(full_task_json)
+
+ # write out the target task set to allow reproducing this as input
+ write_artifact('target-tasks.json', tgg.target_task_set.tasks.keys())
+
+ # write out the optimized task graph to describe what will actually happen,
+ # and the map of labels to taskids
+ write_artifact('task-graph.json', tgg.optimized_task_graph.to_json())
+ write_artifact('label-to-taskid.json', tgg.label_to_taskid)
+
+ # actually create the graph
+ create_tasks(tgg.optimized_task_graph, tgg.label_to_taskid, parameters)
+
+
+def get_decision_parameters(options):
+ """
+ Load parameters from the command-line options for 'taskgraph decision'.
+ This also applies per-project parameters, based on the given project.
+
+ """
+ parameters = {n: options[n] for n in [
+ 'base_repository',
+ 'head_repository',
+ 'head_rev',
+ 'head_ref',
+ 'message',
+ 'project',
+ 'pushlog_id',
+ 'pushdate',
+ 'owner',
+ 'level',
+ 'triggered_by',
+ 'target_tasks_method',
+ ] if n in options}
+
+ # owner must be an email, but sometimes (e.g., for ffxbld) it is not, in which
+ # case, fake it
+ if '@' not in parameters['owner']:
+ parameters['owner'] += '@noreply.mozilla.org'
+
+ # use the pushdate as build_date if given, else use current time
+ parameters['build_date'] = parameters['pushdate'] or int(time.time())
+ # moz_build_date is the build identifier based on build_date
+ parameters['moz_build_date'] = time.strftime("%Y%m%d%H%M%S",
+ time.gmtime(parameters['build_date']))
+
+ project = parameters['project']
+ try:
+ parameters.update(PER_PROJECT_PARAMETERS[project])
+ except KeyError:
+ logger.warning("using default project parameters; add {} to "
+ "PER_PROJECT_PARAMETERS in {} to customize behavior "
+ "for this project".format(project, __file__))
+ parameters.update(PER_PROJECT_PARAMETERS['default'])
+
+ # `target_tasks_method` has higher precedence than `project` parameters
+ if options.get('target_tasks_method'):
+ parameters['target_tasks_method'] = options['target_tasks_method']
+
+ return Parameters(parameters)
+
+
+def write_artifact(filename, data):
+ logger.info('writing artifact file `{}`'.format(filename))
+ if not os.path.isdir(ARTIFACTS_DIR):
+ os.mkdir(ARTIFACTS_DIR)
+ path = os.path.join(ARTIFACTS_DIR, filename)
+ if filename.endswith('.yml'):
+ with open(path, 'w') as f:
+ yaml.safe_dump(data, f, allow_unicode=True, default_flow_style=False)
+ elif filename.endswith('.json'):
+ with open(path, 'w') as f:
+ json.dump(data, f, sort_keys=True, indent=2, separators=(',', ': '))
+ else:
+ raise TypeError("Don't know how to write to {}".format(filename))
+
+
+def get_action_yml(parameters):
+ templates = Templates(os.path.join(GECKO, "taskcluster/taskgraph"))
+ action_parameters = parameters.copy()
+ action_parameters.update({
+ "decision_task_id": "{{decision_task_id}}",
+ "task_labels": "{{task_labels}}",
+ "from_now": json_time_from_now,
+ "now": current_json_time()
+ })
+ return templates.load('action.yml', action_parameters)
diff --git a/taskcluster/taskgraph/docker.py b/taskcluster/taskgraph/docker.py
new file mode 100644
index 000000000..8159fd5a4
--- /dev/null
+++ b/taskcluster/taskgraph/docker.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import json
+import os
+import subprocess
+import tarfile
+import tempfile
+import urllib2
+import which
+
+from taskgraph.util import docker
+
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
+IMAGE_DIR = os.path.join(GECKO, 'testing', 'docker')
+INDEX_URL = 'https://index.taskcluster.net/v1/task/' + docker.INDEX_PREFIX + '.{}.{}.hash.{}'
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+
+
+def load_image_by_name(image_name):
+ context_path = os.path.join(GECKO, 'testing', 'docker', image_name)
+ context_hash = docker.generate_context_hash(GECKO, context_path, image_name)
+
+ image_index_url = INDEX_URL.format('mozilla-central', image_name, context_hash)
+ print("Fetching", image_index_url)
+ task = json.load(urllib2.urlopen(image_index_url))
+
+ return load_image_by_task_id(task['taskId'])
+
+
+def load_image_by_task_id(task_id):
+ # because we need to read this file twice (and one read is not all the way
+ # through), it is difficult to stream it. So we download to disk and then
+ # read it back.
+ filename = 'temp-docker-image.tar'
+
+ artifact_url = ARTIFACT_URL.format(task_id, 'public/image.tar.zst')
+ print("Downloading", artifact_url)
+ tempfilename = 'temp-docker-image.tar.zst'
+ subprocess.check_call(['curl', '-#', '-L', '-o', tempfilename, artifact_url])
+ print("Decompressing")
+ subprocess.check_call(['zstd', '-d', tempfilename, '-o', filename])
+ print("Deleting temporary file")
+ os.unlink(tempfilename)
+
+ print("Determining image name")
+ tf = tarfile.open(filename)
+ repositories = json.load(tf.extractfile('repositories'))
+ name = repositories.keys()[0]
+ tag = repositories[name].keys()[0]
+ name = '{}:{}'.format(name, tag)
+ print("Image name:", name)
+
+ print("Loading image into docker")
+ try:
+ subprocess.check_call(['docker', 'load', '-i', filename])
+ except subprocess.CalledProcessError:
+ print("*** `docker load` failed. You may avoid re-downloading that tarball by fixing the")
+ print("*** problem and running `docker load < {}`.".format(filename))
+ raise
+
+ print("Deleting temporary file")
+ os.unlink(filename)
+
+ print("The requested docker image is now available as", name)
+ print("Try: docker run -ti --rm {} bash".format(name))
+
+
+def build_context(name, outputFile):
+ """Build a context.tar for image with specified name.
+ """
+ if not name:
+ raise ValueError('must provide a Docker image name')
+ if not outputFile:
+ raise ValueError('must provide a outputFile')
+
+ image_dir = os.path.join(IMAGE_DIR, name)
+ if not os.path.isdir(image_dir):
+ raise Exception('image directory does not exist: %s' % image_dir)
+
+ docker.create_context_tar(GECKO, image_dir, outputFile, "")
+
+
+def build_image(name):
+ """Build a Docker image of specified name.
+
+ Output from image building process will be printed to stdout.
+ """
+ if not name:
+ raise ValueError('must provide a Docker image name')
+
+ image_dir = os.path.join(IMAGE_DIR, name)
+ if not os.path.isdir(image_dir):
+ raise Exception('image directory does not exist: %s' % image_dir)
+
+ tag = docker.docker_image(name, default_version='latest')
+
+ docker_bin = which.which('docker')
+
+ # Verify that Docker is working.
+ try:
+ subprocess.check_output([docker_bin, '--version'])
+ except subprocess.CalledProcessError:
+ raise Exception('Docker server is unresponsive. Run `docker ps` and '
+ 'check that Docker is running')
+
+ # We obtain a context archive and build from that. Going through the
+ # archive creation is important: it normalizes things like file owners
+ # and mtimes to increase the chances that image generation is
+ # deterministic.
+ fd, context_path = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ docker.create_context_tar(GECKO, image_dir, context_path, name)
+ docker.build_from_context(docker_bin, context_path, name, tag)
+ finally:
+ os.unlink(context_path)
+
+ print('Successfully built %s and tagged with %s' % (name, tag))
+
+ if tag.endswith(':latest'):
+ print('*' * 50)
+ print('WARNING: no VERSION file found in image directory.')
+ print('Image is not suitable for deploying/pushing.')
+ print('Create an image suitable for deploying/pushing by creating')
+ print('a VERSION file in the image directory.')
+ print('*' * 50)
diff --git a/taskcluster/taskgraph/files_changed.py b/taskcluster/taskgraph/files_changed.py
new file mode 100644
index 000000000..973a62bb8
--- /dev/null
+++ b/taskcluster/taskgraph/files_changed.py
@@ -0,0 +1,65 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Support for optimizing tasks based on the set of files that have changed.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import requests
+from redo import retry
+from mozpack.path import match as mozpackmatch
+
+logger = logging.getLogger(__name__)
+_cache = {}
+
+
+def get_changed_files(repository, revision):
+ """
+ Get the set of files changed in the push headed by the given revision.
+ Responses are cached, so multiple calls with the same arguments are OK.
+ """
+ key = repository, revision
+ if key not in _cache:
+ url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'), revision)
+ logger.debug("Querying version control for metadata: %s", url)
+
+ def get_automationrelevance():
+ response = requests.get(url, timeout=5)
+ return response.json()
+ contents = retry(get_automationrelevance, attempts=2, sleeptime=10)
+
+ logger.debug('{} commits influencing task scheduling:'
+ .format(len(contents['changesets'])))
+ changed_files = set()
+ for c in contents['changesets']:
+ logger.debug(" {cset} {desc}".format(
+ cset=c['node'][0:12],
+ desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
+ changed_files |= set(c['files'])
+
+ _cache[key] = changed_files
+ return _cache[key]
+
+
+def check(params, file_patterns):
+ """Determine whether any of the files changed in the indicated push to
+ https://hg.mozilla.org match any of the given file patterns."""
+ repository = params.get('head_repository')
+ revision = params.get('head_rev')
+ if not repository or not revision:
+ logger.warning("Missing `head_repository` or `head_rev` parameters; "
+ "assuming all files have changed")
+ return True
+
+ changed_files = get_changed_files(repository, revision)
+
+ for pattern in file_patterns:
+ for path in changed_files:
+ if mozpackmatch(path, pattern):
+ return True
+
+ return False
diff --git a/taskcluster/taskgraph/generator.py b/taskcluster/taskgraph/generator.py
new file mode 100644
index 000000000..809ed1f5c
--- /dev/null
+++ b/taskcluster/taskgraph/generator.py
@@ -0,0 +1,218 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+import logging
+import os
+import yaml
+
+from .graph import Graph
+from .taskgraph import TaskGraph
+from .optimize import optimize_task_graph
+from .util.python_path import find_object
+
+logger = logging.getLogger(__name__)
+
+
+class Kind(object):
+
+ def __init__(self, name, path, config):
+ self.name = name
+ self.path = path
+ self.config = config
+
+ def _get_impl_class(self):
+ # load the class defined by implementation
+ try:
+ impl = self.config['implementation']
+ except KeyError:
+ raise KeyError("{!r} does not define implementation".format(self.path))
+ return find_object(impl)
+
+ def load_tasks(self, parameters, loaded_tasks):
+ impl_class = self._get_impl_class()
+ return impl_class.load_tasks(self.name, self.path, self.config,
+ parameters, loaded_tasks)
+
+
+class TaskGraphGenerator(object):
+ """
+ The central controller for taskgraph. This handles all phases of graph
+ generation. The task is generated from all of the kinds defined in
+ subdirectories of the generator's root directory.
+
+ Access to the results of this generation, as well as intermediate values at
+ various phases of generation, is available via properties. This encourages
+ the provision of all generation inputs at instance construction time.
+ """
+
+ # Task-graph generation is implemented as a Python generator that yields
+ # each "phase" of generation. This allows some mach subcommands to short-
+ # circuit generation of the entire graph by never completing the generator.
+
+ def __init__(self, root_dir, parameters,
+ target_tasks_method):
+ """
+ @param root_dir: root directory, with subdirectories for each kind
+ @param parameters: parameters for this task-graph generation
+ @type parameters: dict
+ @param target_tasks_method: function to determine the target_task_set;
+ see `./target_tasks.py`.
+ @type target_tasks_method: function
+ """
+
+ self.root_dir = root_dir
+ self.parameters = parameters
+ self.target_tasks_method = target_tasks_method
+
+ # this can be set up until the time the target task set is generated;
+ # it defaults to parameters['target_tasks']
+ self._target_tasks = parameters.get('target_tasks')
+
+ # start the generator
+ self._run = self._run()
+ self._run_results = {}
+
+ @property
+ def full_task_set(self):
+ """
+ The full task set: all tasks defined by any kind (a graph without edges)
+
+ @type: TaskGraph
+ """
+ return self._run_until('full_task_set')
+
+ @property
+ def full_task_graph(self):
+ """
+ The full task graph: the full task set, with edges representing
+ dependencies.
+
+ @type: TaskGraph
+ """
+ return self._run_until('full_task_graph')
+
+ @property
+ def target_task_set(self):
+ """
+ The set of targetted tasks (a graph without edges)
+
+ @type: TaskGraph
+ """
+ return self._run_until('target_task_set')
+
+ @property
+ def target_task_graph(self):
+ """
+ The set of targetted tasks and all of their dependencies
+
+ @type: TaskGraph
+ """
+ return self._run_until('target_task_graph')
+
+ @property
+ def optimized_task_graph(self):
+ """
+ The set of targetted tasks and all of their dependencies; tasks that
+ have been optimized out are either omitted or replaced with a Task
+ instance containing only a task_id.
+
+ @type: TaskGraph
+ """
+ return self._run_until('optimized_task_graph')
+
+ @property
+ def label_to_taskid(self):
+ """
+ A dictionary mapping task label to assigned taskId. This property helps
+ in interpreting `optimized_task_graph`.
+
+ @type: dictionary
+ """
+ return self._run_until('label_to_taskid')
+
+ def _load_kinds(self):
+ for path in os.listdir(self.root_dir):
+ path = os.path.join(self.root_dir, path)
+ if not os.path.isdir(path):
+ continue
+ kind_name = os.path.basename(path)
+
+ kind_yml = os.path.join(path, 'kind.yml')
+ if not os.path.exists(kind_yml):
+ continue
+
+ logger.debug("loading kind `{}` from `{}`".format(kind_name, path))
+ with open(kind_yml) as f:
+ config = yaml.load(f)
+
+ yield Kind(kind_name, path, config)
+
+ def _run(self):
+ logger.info("Loading kinds")
+ # put the kinds into a graph and sort topologically so that kinds are loaded
+ # in post-order
+ kinds = {kind.name: kind for kind in self._load_kinds()}
+ edges = set()
+ for kind in kinds.itervalues():
+ for dep in kind.config.get('kind-dependencies', []):
+ edges.add((kind.name, dep, 'kind-dependency'))
+ kind_graph = Graph(set(kinds), edges)
+
+ logger.info("Generating full task set")
+ all_tasks = {}
+ for kind_name in kind_graph.visit_postorder():
+ logger.debug("Loading tasks for kind {}".format(kind_name))
+ kind = kinds[kind_name]
+ new_tasks = kind.load_tasks(self.parameters, list(all_tasks.values()))
+ for task in new_tasks:
+ if task.label in all_tasks:
+ raise Exception("duplicate tasks with label " + task.label)
+ all_tasks[task.label] = task
+ logger.info("Generated {} tasks for kind {}".format(len(new_tasks), kind_name))
+ full_task_set = TaskGraph(all_tasks, Graph(set(all_tasks), set()))
+ yield 'full_task_set', full_task_set
+
+ logger.info("Generating full task graph")
+ edges = set()
+ for t in full_task_set:
+ for dep, depname in t.get_dependencies(full_task_set):
+ edges.add((t.label, dep, depname))
+
+ full_task_graph = TaskGraph(all_tasks,
+ Graph(full_task_set.graph.nodes, edges))
+ yield 'full_task_graph', full_task_graph
+
+ logger.info("Generating target task set")
+ target_tasks = set(self.target_tasks_method(full_task_graph, self.parameters))
+ target_task_set = TaskGraph(
+ {l: all_tasks[l] for l in target_tasks},
+ Graph(target_tasks, set()))
+ yield 'target_task_set', target_task_set
+
+ logger.info("Generating target task graph")
+ target_graph = full_task_graph.graph.transitive_closure(target_tasks)
+ target_task_graph = TaskGraph(
+ {l: all_tasks[l] for l in target_graph.nodes},
+ target_graph)
+ yield 'target_task_graph', target_task_graph
+
+ logger.info("Generating optimized task graph")
+ do_not_optimize = set()
+ if not self.parameters.get('optimize_target_tasks', True):
+ do_not_optimize = target_task_set.graph.nodes
+ optimized_task_graph, label_to_taskid = optimize_task_graph(target_task_graph,
+ self.parameters,
+ do_not_optimize)
+ yield 'label_to_taskid', label_to_taskid
+ yield 'optimized_task_graph', optimized_task_graph
+
+ def _run_until(self, name):
+ while name not in self._run_results:
+ try:
+ k, v = self._run.next()
+ except StopIteration:
+ raise AttributeError("No such run result {}".format(name))
+ self._run_results[k] = v
+ return self._run_results[name]
diff --git a/taskcluster/taskgraph/graph.py b/taskcluster/taskgraph/graph.py
new file mode 100644
index 000000000..731341c51
--- /dev/null
+++ b/taskcluster/taskgraph/graph.py
@@ -0,0 +1,117 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import collections
+
+
+class Graph(object):
+ """
+ Generic representation of a directed acyclic graph with labeled edges
+ connecting the nodes. Graph operations are implemented in a functional
+ manner, so the data structure is immutable.
+
+ It permits at most one edge of a given name between any set of nodes. The
+ graph is not checked for cycles, and methods may hang or otherwise fail if
+ given a cyclic graph.
+
+ The `nodes` and `edges` attributes may be accessed in a read-only fashion.
+ The `nodes` attribute is a set of node names, while `edges` is a set of
+ `(left, right, name)` tuples representing an edge named `name` going from
+ node `left` to node `right..
+ """
+
+ def __init__(self, nodes, edges):
+ """
+ Create a graph. Nodes and edges are both as described in the class
+ documentation. Both values are used by reference, and should not be
+ modified after building a graph.
+ """
+ assert isinstance(nodes, set)
+ assert isinstance(edges, set)
+ self.nodes = nodes
+ self.edges = edges
+
+ def __eq__(self, other):
+ return self.nodes == other.nodes and self.edges == other.edges
+
+ def __repr__(self):
+ return "<Graph nodes={!r} edges={!r}>".format(self.nodes, self.edges)
+
+ def transitive_closure(self, nodes):
+ """
+ Return the transitive closure of <nodes>: the graph containing all
+ specified nodes as well as any nodes reachable from them, and any
+ intervening edges.
+ """
+ assert isinstance(nodes, set)
+ assert nodes <= self.nodes
+
+ # generate a new graph by expanding along edges until reaching a fixed
+ # point
+ new_nodes, new_edges = nodes, set()
+ nodes, edges = set(), set()
+ while (new_nodes, new_edges) != (nodes, edges):
+ nodes, edges = new_nodes, new_edges
+ add_edges = set((left, right, name)
+ for (left, right, name) in self.edges
+ if left in nodes)
+ add_nodes = set(right for (_, right, _) in add_edges)
+ new_nodes = nodes | add_nodes
+ new_edges = edges | add_edges
+ return Graph(new_nodes, new_edges)
+
+ def visit_postorder(self):
+ """
+ Generate a sequence of nodes in postorder, such that every node is
+ visited *after* any nodes it links to.
+
+ Behavior is undefined (read: it will hang) if the graph contains a
+ cycle.
+ """
+ queue = collections.deque(sorted(self.nodes))
+ links_by_node = self.links_dict()
+ seen = set()
+ while queue:
+ node = queue.popleft()
+ if node in seen:
+ continue
+ links = links_by_node[node]
+ if all((n in seen) for n in links):
+ seen.add(node)
+ yield node
+ else:
+ queue.extend(n for n in links if n not in seen)
+ queue.append(node)
+
+ def links_dict(self):
+ """
+ Return a dictionary mapping each node to a set of the nodes it links to
+ (omitting edge names)
+ """
+ links = collections.defaultdict(set)
+ for left, right, _ in self.edges:
+ links[left].add(right)
+ return links
+
+ def named_links_dict(self):
+ """
+ Return a two-level dictionary mapping each node to a dictionary mapping
+ edge names to labels.
+ """
+ links = collections.defaultdict(dict)
+ for left, right, name in self.edges:
+ links[left][name] = right
+ return links
+
+ def reverse_links_dict(self):
+ """
+ Return a dictionary mapping each node to a set of the nodes linking to
+ it (omitting edge names)
+ """
+ links = collections.defaultdict(set)
+ for left, right, _ in self.edges:
+ links[right].add(left)
+ return links
diff --git a/taskcluster/taskgraph/optimize.py b/taskcluster/taskgraph/optimize.py
new file mode 100644
index 000000000..120e6807b
--- /dev/null
+++ b/taskcluster/taskgraph/optimize.py
@@ -0,0 +1,156 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+import logging
+import re
+
+from .graph import Graph
+from .taskgraph import TaskGraph
+from slugid import nice as slugid
+
+logger = logging.getLogger(__name__)
+TASK_REFERENCE_PATTERN = re.compile('<([^>]+)>')
+
+
+def optimize_task_graph(target_task_graph, params, do_not_optimize, existing_tasks=None):
+ """
+ Perform task optimization, without optimizing tasks named in
+ do_not_optimize.
+ """
+ named_links_dict = target_task_graph.graph.named_links_dict()
+ label_to_taskid = {}
+
+ # This proceeds in two phases. First, mark all optimized tasks (those
+ # which will be removed from the graph) as such, including a replacement
+ # taskId where applicable. Second, generate a new task graph containing
+ # only the non-optimized tasks, with all task labels resolved to taskIds
+ # and with task['dependencies'] populated.
+ annotate_task_graph(target_task_graph=target_task_graph,
+ params=params,
+ do_not_optimize=do_not_optimize,
+ named_links_dict=named_links_dict,
+ label_to_taskid=label_to_taskid,
+ existing_tasks=existing_tasks)
+ return get_subgraph(target_task_graph, named_links_dict, label_to_taskid), label_to_taskid
+
+
+def resolve_task_references(label, task_def, taskid_for_edge_name):
+ def repl(match):
+ key = match.group(1)
+ try:
+ return taskid_for_edge_name[key]
+ except KeyError:
+ # handle escaping '<'
+ if key == '<':
+ return key
+ raise KeyError("task '{}' has no dependency named '{}'".format(label, key))
+
+ def recurse(val):
+ if isinstance(val, list):
+ return [recurse(v) for v in val]
+ elif isinstance(val, dict):
+ if val.keys() == ['task-reference']:
+ return TASK_REFERENCE_PATTERN.sub(repl, val['task-reference'])
+ else:
+ return {k: recurse(v) for k, v in val.iteritems()}
+ else:
+ return val
+ return recurse(task_def)
+
+
+def annotate_task_graph(target_task_graph, params, do_not_optimize,
+ named_links_dict, label_to_taskid, existing_tasks):
+ """
+ Annotate each task in the graph with .optimized (boolean) and .task_id
+ (possibly None), following the rules for optimization and calling the task
+ kinds' `optimize_task` method.
+
+ As a side effect, label_to_taskid is updated with labels for all optimized
+ tasks that are replaced with existing tasks.
+ """
+
+ # set .optimized for all tasks, and .task_id for optimized tasks
+ # with replacements
+ for label in target_task_graph.graph.visit_postorder():
+ task = target_task_graph.tasks[label]
+ named_task_dependencies = named_links_dict.get(label, {})
+
+ # check whether any dependencies have been optimized away
+ dependencies = [target_task_graph.tasks[l] for l in named_task_dependencies.itervalues()]
+ for t in dependencies:
+ if t.optimized and not t.task_id:
+ raise Exception(
+ "task {} was optimized away, but {} depends on it".format(
+ t.label, label))
+
+ # if this task is blacklisted, don't even consider optimizing
+ replacement_task_id = None
+ if label in do_not_optimize:
+ optimized = False
+ # Let's check whether this task has been created before
+ elif existing_tasks is not None and label in existing_tasks:
+ optimized = True
+ replacement_task_id = existing_tasks[label]
+ # otherwise, examine the task itself (which may be an expensive operation)
+ else:
+ optimized, replacement_task_id = task.optimize(params)
+
+ task.optimized = optimized
+ task.task_id = replacement_task_id
+ if replacement_task_id:
+ label_to_taskid[label] = replacement_task_id
+
+ if optimized:
+ if replacement_task_id:
+ logger.debug("optimizing `{}`, replacing with task `{}`"
+ .format(label, replacement_task_id))
+ else:
+ logger.debug("optimizing `{}` away".format(label))
+ # note: any dependent tasks will fail when they see this
+ else:
+ if replacement_task_id:
+ raise Exception("{}: optimize_task returned False with a taskId".format(label))
+
+
+def get_subgraph(annotated_task_graph, named_links_dict, label_to_taskid):
+ """
+ Return the subgraph of annotated_task_graph consisting only of
+ non-optimized tasks and edges between them.
+
+ To avoid losing track of taskIds for tasks optimized away, this method
+ simultaneously substitutes real taskIds for task labels in the graph, and
+ populates each task definition's `dependencies` key with the appropriate
+ taskIds. Task references are resolved in the process.
+ """
+
+ # resolve labels to taskIds and populate task['dependencies']
+ tasks_by_taskid = {}
+ for label in annotated_task_graph.graph.visit_postorder():
+ task = annotated_task_graph.tasks[label]
+ if task.optimized:
+ continue
+ task.task_id = label_to_taskid[label] = slugid()
+ named_task_dependencies = {
+ name: label_to_taskid[label]
+ for name, label in named_links_dict.get(label, {}).iteritems()}
+ task.task = resolve_task_references(task.label, task.task, named_task_dependencies)
+ task.task.setdefault('dependencies', []).extend(named_task_dependencies.itervalues())
+ tasks_by_taskid[task.task_id] = task
+
+ # resolve edges to taskIds
+ edges_by_taskid = (
+ (label_to_taskid.get(left), label_to_taskid.get(right), name)
+ for (left, right, name) in annotated_task_graph.graph.edges
+ )
+ # ..and drop edges that are no longer in the task graph
+ edges_by_taskid = set(
+ (left, right, name)
+ for (left, right, name) in edges_by_taskid
+ if left in tasks_by_taskid and right in tasks_by_taskid
+ )
+
+ return TaskGraph(
+ tasks_by_taskid,
+ Graph(set(tasks_by_taskid), edges_by_taskid))
diff --git a/taskcluster/taskgraph/parameters.py b/taskcluster/taskgraph/parameters.py
new file mode 100644
index 000000000..51b9e77c7
--- /dev/null
+++ b/taskcluster/taskgraph/parameters.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import json
+import yaml
+from mozbuild.util import ReadOnlyDict
+
+# Please keep this list sorted and in sync with taskcluster/docs/parameters.rst
+PARAMETER_NAMES = set([
+ 'base_repository',
+ 'build_date',
+ 'head_ref',
+ 'head_repository',
+ 'head_rev',
+ 'level',
+ 'message',
+ 'moz_build_date',
+ 'optimize_target_tasks',
+ 'owner',
+ 'project',
+ 'pushdate',
+ 'pushlog_id',
+ 'target_tasks_method',
+ 'triggered_by',
+])
+
+
+class Parameters(ReadOnlyDict):
+ """An immutable dictionary with nicer KeyError messages on failure"""
+ def check(self):
+ names = set(self)
+ msg = []
+
+ missing = PARAMETER_NAMES - names
+ if missing:
+ msg.append("missing parameters: " + ", ".join(missing))
+
+ extra = names - PARAMETER_NAMES
+ if extra:
+ msg.append("extra parameters: " + ", ".join(extra))
+
+ if msg:
+ raise Exception("; ".join(msg))
+
+ def __getitem__(self, k):
+ if k not in PARAMETER_NAMES:
+ raise KeyError("no such parameter {!r}".format(k))
+ try:
+ return super(Parameters, self).__getitem__(k)
+ except KeyError:
+ raise KeyError("taskgraph parameter {!r} not found".format(k))
+
+
+def load_parameters_file(options):
+ """
+ Load parameters from the --parameters option
+ """
+ filename = options['parameters']
+ if not filename:
+ return Parameters()
+ with open(filename) as f:
+ if filename.endswith('.yml'):
+ return Parameters(**yaml.safe_load(f))
+ elif filename.endswith('.json'):
+ return Parameters(**json.load(f))
+ else:
+ raise TypeError("Parameters file `{}` is not JSON or YAML".format(filename))
diff --git a/taskcluster/taskgraph/target_tasks.py b/taskcluster/taskgraph/target_tasks.py
new file mode 100644
index 000000000..d2b3f5a7f
--- /dev/null
+++ b/taskcluster/taskgraph/target_tasks.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+from taskgraph import try_option_syntax
+
+INTEGRATION_PROJECTS = set([
+ 'mozilla-inbound',
+ 'autoland',
+])
+
+RELEASE_PROJECTS = set([
+ 'mozilla-central',
+ 'mozilla-aurora',
+ 'mozilla-beta',
+ 'mozilla-release',
+ 'mozilla-esr52',
+])
+
+_target_task_methods = {}
+
+
+def _target_task(name):
+ def wrap(func):
+ _target_task_methods[name] = func
+ return func
+ return wrap
+
+
+def get_method(method):
+ """Get a target_task_method to pass to a TaskGraphGenerator."""
+ return _target_task_methods[method]
+
+
+@_target_task('try_option_syntax')
+def target_tasks_try_option_syntax(full_task_graph, parameters):
+ """Generate a list of target tasks based on try syntax in
+ parameters['message'] and, for context, the full task graph."""
+ options = try_option_syntax.TryOptionSyntax(parameters['message'], full_task_graph)
+ target_tasks_labels = [t.label for t in full_task_graph.tasks.itervalues()
+ if options.task_matches(t.attributes)]
+
+ # If the developer wants test jobs to be rebuilt N times we add that value here
+ if int(options.trigger_tests) > 1:
+ for l in target_tasks_labels:
+ task = full_task_graph[l]
+ if 'unittest_suite' in task.attributes:
+ task.attributes['task_duplicates'] = options.trigger_tests
+
+ # Add notifications here as well
+ if options.notifications:
+ for task in full_task_graph:
+ owner = parameters.get('owner')
+ routes = task.task.setdefault('routes', [])
+ if options.notifications == 'all':
+ routes.append("notify.email.{}.on-any".format(owner))
+ elif options.notifications == 'failure':
+ routes.append("notify.email.{}.on-failed".format(owner))
+ routes.append("notify.email.{}.on-exception".format(owner))
+
+ return target_tasks_labels
+
+
+@_target_task('default')
+def target_tasks_default(full_task_graph, parameters):
+ """Target the tasks which have indicated they should be run on this project
+ via the `run_on_projects` attributes."""
+ def filter(task):
+ run_on_projects = set(t.attributes.get('run_on_projects', []))
+ if 'all' in run_on_projects:
+ return True
+ project = parameters['project']
+ if 'integration' in run_on_projects:
+ if project in INTEGRATION_PROJECTS:
+ return True
+ if 'release' in run_on_projects:
+ if project in RELEASE_PROJECTS:
+ return True
+ return project in run_on_projects
+ return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]
+
+
+@_target_task('ash_tasks')
+def target_tasks_ash(full_task_graph, parameters):
+ """Target tasks that only run on the ash branch."""
+ def filter(task):
+ platform = task.attributes.get('build_platform')
+ # only select platforms
+ if platform not in ('linux64', 'linux64-asan', 'linux64-pgo'):
+ return False
+ # and none of this linux64-asan/debug stuff
+ if platform == 'linux64-asan' and task.attributes['build_type'] == 'debug':
+ return False
+ # no non-et10s tests
+ if task.attributes.get('unittest_suite') or task.attributes.get('talos_siute'):
+ if not task.attributes.get('e10s'):
+ return False
+ # don't upload symbols
+ if task.attributes['kind'] == 'upload-symbols':
+ return False
+ return True
+ return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]
+
+
+@_target_task('cedar_tasks')
+def target_tasks_cedar(full_task_graph, parameters):
+ """Target tasks that only run on the cedar branch."""
+ def filter(task):
+ platform = task.attributes.get('build_platform')
+ # only select platforms
+ if platform not in ['linux64']:
+ return False
+ if task.attributes.get('unittest_suite'):
+ if not (task.attributes['unittest_suite'].startswith('mochitest')
+ or 'xpcshell' in task.attributes['unittest_suite']):
+ return False
+ return True
+ return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]
diff --git a/taskcluster/taskgraph/task/__init__.py b/taskcluster/taskgraph/task/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/taskcluster/taskgraph/task/__init__.py
diff --git a/taskcluster/taskgraph/task/base.py b/taskcluster/taskgraph/task/base.py
new file mode 100644
index 000000000..2d9cbf5d9
--- /dev/null
+++ b/taskcluster/taskgraph/task/base.py
@@ -0,0 +1,108 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import abc
+
+
+class Task(object):
+ """
+ Representation of a task in a TaskGraph. Each Task has, at creation:
+
+ - kind: the name of the task kind
+ - label; the label for this task
+ - attributes: a dictionary of attributes for this task (used for filtering)
+ - task: the task definition (JSON-able dictionary)
+
+ And later, as the task-graph processing proceeds:
+
+ - task_id -- TaskCluster taskId under which this task will be created
+ - optimized -- true if this task need not be performed
+
+ A kind represents a collection of tasks that share common characteristics.
+ For example, all build jobs. Each instance of a kind is intialized with a
+ path from which it draws its task configuration. The instance is free to
+ store as much local state as it needs.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, kind, label, attributes, task):
+ self.kind = kind
+ self.label = label
+ self.attributes = attributes
+ self.task = task
+
+ self.task_id = None
+ self.optimized = False
+
+ self.attributes['kind'] = kind
+
+ def __eq__(self, other):
+ return self.kind == other.kind and \
+ self.label == other.label and \
+ self.attributes == other.attributes and \
+ self.task == other.task and \
+ self.task_id == other.task_id
+
+ @classmethod
+ @abc.abstractmethod
+ def load_tasks(cls, kind, path, config, parameters, loaded_tasks):
+ """
+ Load the tasks for a given kind.
+
+ The `kind` is the name of the kind; the configuration for that kind
+ named this class.
+
+ The `path` is the path to the configuration directory for the kind. This
+ can be used to load extra data, templates, etc.
+
+ The `parameters` give details on which to base the task generation.
+ See `taskcluster/docs/parameters.rst` for details.
+
+ At the time this method is called, all kinds on which this kind depends
+ (that is, specified in the `kind-dependencies` key in `self.config`
+ have already loaded their tasks, and those tasks are available in
+ the list `loaded_tasks`.
+
+ The return value is a list of Task instances.
+ """
+
+ @abc.abstractmethod
+ def get_dependencies(self, taskgraph):
+ """
+ Get the set of task labels this task depends on, by querying the full
+ task set, given as `taskgraph`.
+
+ Returns a list of (task_label, dependency_name) pairs describing the
+ dependencies.
+ """
+
+ def optimize(self, params):
+ """
+ Determine whether this task can be optimized, and if it can, what taskId
+ it should be replaced with.
+
+ The return value is a tuple `(optimized, taskId)`. If `optimized` is
+ true, then the task will be optimized (in other words, not included in
+ the task graph). If the second argument is a taskid, then any
+ dependencies on this task will isntead depend on that taskId. It is an
+ error to return no taskId for a task on which other tasks depend.
+
+ The default never optimizes.
+ """
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ """
+ Given a data structure as produced by taskgraph.to_json, re-construct
+ the original Task object. This is used to "resume" the task-graph
+ generation process, for example in Action tasks.
+ """
+ return cls(
+ kind=task_dict['attributes']['kind'],
+ label=task_dict['label'],
+ attributes=task_dict['attributes'],
+ task=task_dict['task'])
diff --git a/taskcluster/taskgraph/task/docker_image.py b/taskcluster/taskgraph/task/docker_image.py
new file mode 100644
index 000000000..fd67c4832
--- /dev/null
+++ b/taskcluster/taskgraph/task/docker_image.py
@@ -0,0 +1,130 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import json
+import os
+import urllib2
+
+from . import base
+from taskgraph.util.docker import (
+ docker_image,
+ generate_context_hash,
+ INDEX_PREFIX,
+)
+from taskgraph.util.templates import Templates
+
+logger = logging.getLogger(__name__)
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+INDEX_URL = 'https://index.taskcluster.net/v1/task/{}'
+
+
+class DockerImageTask(base.Task):
+
+ def __init__(self, *args, **kwargs):
+ self.index_paths = kwargs.pop('index_paths')
+ super(DockerImageTask, self).__init__(*args, **kwargs)
+
+ def __eq__(self, other):
+ return super(DockerImageTask, self).__eq__(other) and \
+ self.index_paths == other.index_paths
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, params, loaded_tasks):
+ parameters = {
+ 'pushlog_id': params.get('pushlog_id', 0),
+ 'pushdate': params['moz_build_date'],
+ 'pushtime': params['moz_build_date'][8:],
+ 'year': params['moz_build_date'][0:4],
+ 'month': params['moz_build_date'][4:6],
+ 'day': params['moz_build_date'][6:8],
+ 'project': params['project'],
+ 'docker_image': docker_image,
+ 'base_repository': params['base_repository'] or params['head_repository'],
+ 'head_repository': params['head_repository'],
+ 'head_ref': params['head_ref'] or params['head_rev'],
+ 'head_rev': params['head_rev'],
+ 'owner': params['owner'],
+ 'level': params['level'],
+ 'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'
+ .format(repo=params['head_repository'], rev=params['head_rev']),
+ 'index_image_prefix': INDEX_PREFIX,
+ 'artifact_path': 'public/image.tar.zst',
+ }
+
+ tasks = []
+ templates = Templates(path)
+ for image_name, image_symbol in config['images'].iteritems():
+ context_path = os.path.join('testing', 'docker', image_name)
+ context_hash = generate_context_hash(GECKO, context_path, image_name)
+
+ image_parameters = dict(parameters)
+ image_parameters['image_name'] = image_name
+ image_parameters['context_hash'] = context_hash
+
+ image_task = templates.load('image.yml', image_parameters)
+ attributes = {'image_name': image_name}
+
+ # unique symbol for different docker image
+ if 'extra' in image_task['task']:
+ image_task['task']['extra']['treeherder']['symbol'] = image_symbol
+
+ # As an optimization, if the context hash exists for a high level, that image
+ # task ID will be used. The reasoning behind this is that eventually everything ends
+ # up on level 3 at some point if most tasks use this as a common image
+ # for a given context hash, a worker within Taskcluster does not need to contain
+ # the same image per branch.
+ index_paths = ['{}.level-{}.{}.hash.{}'.format(
+ INDEX_PREFIX, level, image_name, context_hash)
+ for level in range(int(params['level']), 4)]
+
+ tasks.append(cls(kind, 'build-docker-image-' + image_name,
+ task=image_task['task'], attributes=attributes,
+ index_paths=index_paths))
+
+ return tasks
+
+ def get_dependencies(self, taskgraph):
+ return []
+
+ def optimize(self, params):
+ for index_path in self.index_paths:
+ try:
+ url = INDEX_URL.format(index_path)
+ existing_task = json.load(urllib2.urlopen(url))
+ # Only return the task ID if the artifact exists for the indexed
+ # task. Otherwise, continue on looking at each of the branches. Method
+ # continues trying other branches in case mozilla-central has an expired
+ # artifact, but 'project' might not. Only return no task ID if all
+ # branches have been tried
+ request = urllib2.Request(
+ ARTIFACT_URL.format(existing_task['taskId'], 'public/image.tar.zst'))
+ request.get_method = lambda: 'HEAD'
+ urllib2.urlopen(request)
+
+ # HEAD success on the artifact is enough
+ return True, existing_task['taskId']
+ except urllib2.HTTPError:
+ pass
+
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ # Generating index_paths for optimization
+ imgMeta = task_dict['task']['extra']['imageMeta']
+ image_name = imgMeta['imageName']
+ context_hash = imgMeta['contextHash']
+ index_paths = ['{}.level-{}.{}.hash.{}'.format(
+ INDEX_PREFIX, level, image_name, context_hash)
+ for level in range(int(imgMeta['level']), 4)]
+ docker_image_task = cls(kind='docker-image',
+ label=task_dict['label'],
+ attributes=task_dict['attributes'],
+ task=task_dict['task'],
+ index_paths=index_paths)
+ return docker_image_task
diff --git a/taskcluster/taskgraph/task/post_build.py b/taskcluster/taskgraph/task/post_build.py
new file mode 100644
index 000000000..09c76c44a
--- /dev/null
+++ b/taskcluster/taskgraph/task/post_build.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public License,
+# v. 2.0. If a copy of the MPL was not distributed with this file, You can
+# obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import copy
+import logging
+
+from . import transform
+from ..util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+
+class PostBuildTask(transform.TransformTask):
+ """
+ A task implementing a post-build job. These depend on jobs and perform
+ various followup tasks after a build has completed.
+
+ The `only-for-build-platforms` kind configuration, if specified, will limit
+ the build platforms for which a post-build task will be created.
+
+ The `job-template' kind configuration points to a yaml file which will
+ be used to create the input to the transforms. It will have added to it
+ keys `build-label`, the label for the build task, and `build-platform`, its
+ platform.
+ """
+
+ @classmethod
+ def get_inputs(cls, kind, path, config, params, loaded_tasks):
+ if config.get('kind-dependencies', []) != ["build"]:
+ raise Exception("PostBuildTask kinds must depend on builds")
+
+ only_platforms = config.get('only-for-build-platforms')
+ prototype = load_yaml(path, config.get('job-template'))
+
+ for task in loaded_tasks:
+ if task.kind != 'build':
+ continue
+
+ build_platform = task.attributes.get('build_platform')
+ build_type = task.attributes.get('build_type')
+ if not build_platform or not build_type:
+ continue
+ platform = "{}/{}".format(build_platform, build_type)
+ if only_platforms and platform not in only_platforms:
+ continue
+
+ post_task = copy.deepcopy(prototype)
+ post_task['build-label'] = task.label
+ post_task['build-platform'] = platform
+ yield post_task
diff --git a/taskcluster/taskgraph/task/signing.py b/taskcluster/taskgraph/task/signing.py
new file mode 100644
index 000000000..a2a9ae3d6
--- /dev/null
+++ b/taskcluster/taskgraph/task/signing.py
@@ -0,0 +1,64 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import os
+
+from . import base
+from taskgraph.util.templates import Templates
+
+
+logger = logging.getLogger(__name__)
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+INDEX_URL = 'https://index.taskcluster.net/v1/task/{}'
+
+
+class SigningTask(base.Task):
+
+ def __init__(self, kind, name, task, attributes):
+ self.unsigned_artifact_label = task['unsigned-task']['label']
+ super(SigningTask, self).__init__(kind, name, task=task['task'],
+ attributes=attributes)
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, params, loaded_tasks):
+ root = os.path.abspath(path)
+
+ tasks = []
+ for filename in config.get('jobs-from', []):
+ templates = Templates(root)
+ jobs = templates.load(filename, {})
+
+ for name, job in jobs.iteritems():
+ for artifact in job['unsigned-task']['artifacts']:
+ url = ARTIFACT_URL.format('<{}>'.format('unsigned-artifact'), artifact)
+ job['task']['payload']['unsignedArtifacts'].append({
+ 'task-reference': url
+ })
+ attributes = job.setdefault('attributes', {})
+ attributes.update({'kind': 'signing'})
+ tasks.append(cls(kind, name, job, attributes=attributes))
+
+ return tasks
+
+ def get_dependencies(self, taskgraph):
+ return [(self.unsigned_artifact_label, 'unsigned-artifact')]
+
+ def optimize(self, params):
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ unsigned_task_label = task_dict['dependencies']['unsigned-artifact']
+ task_dict['unsigned-task'] = {
+ 'label': unsigned_task_label
+ }
+ signing_task = cls(kind='build-signing',
+ name=task_dict['label'],
+ attributes=task_dict['attributes'],
+ task=task_dict)
+ return signing_task
diff --git a/taskcluster/taskgraph/task/test.py b/taskcluster/taskgraph/task/test.py
new file mode 100644
index 000000000..928f32a5a
--- /dev/null
+++ b/taskcluster/taskgraph/task/test.py
@@ -0,0 +1,112 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import copy
+import logging
+
+from . import transform
+from ..util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+
+class TestTask(transform.TransformTask):
+ """
+ A task implementing a Gecko test.
+ """
+
+ @classmethod
+ def get_inputs(cls, kind, path, config, params, loaded_tasks):
+
+ # the kind on which this one depends
+ if len(config.get('kind-dependencies', [])) != 1:
+ raise Exception("TestTask kinds must have exactly one item in kind-dependencies")
+ dep_kind = config['kind-dependencies'][0]
+
+ # get build tasks, keyed by build platform
+ builds_by_platform = cls.get_builds_by_platform(dep_kind, loaded_tasks)
+
+ # get the test platforms for those build tasks
+ test_platforms_cfg = load_yaml(path, 'test-platforms.yml')
+ test_platforms = cls.get_test_platforms(test_platforms_cfg, builds_by_platform)
+
+ # expand the test sets for each of those platforms
+ test_sets_cfg = load_yaml(path, 'test-sets.yml')
+ test_platforms = cls.expand_tests(test_sets_cfg, test_platforms)
+
+ # load the test descriptions
+ test_descriptions = load_yaml(path, 'tests.yml')
+
+ # generate all tests for all test platforms
+ for test_platform_name, test_platform in test_platforms.iteritems():
+ for test_name in test_platform['test-names']:
+ test = copy.deepcopy(test_descriptions[test_name])
+ test['build-platform'] = test_platform['build-platform']
+ test['test-platform'] = test_platform_name
+ test['build-label'] = test_platform['build-label']
+ test['test-name'] = test_name
+
+ logger.debug("Generating tasks for {} test {} on platform {}".format(
+ kind, test_name, test['test-platform']))
+ yield test
+
+ @classmethod
+ def get_builds_by_platform(cls, dep_kind, loaded_tasks):
+ """Find the build tasks on which tests will depend, keyed by
+ platform/type. Returns a dictionary mapping build platform to task
+ label."""
+ builds_by_platform = {}
+ for task in loaded_tasks:
+ if task.kind != dep_kind:
+ continue
+
+ build_platform = task.attributes.get('build_platform')
+ build_type = task.attributes.get('build_type')
+ if not build_platform or not build_type:
+ continue
+ platform = "{}/{}".format(build_platform, build_type)
+ if platform in builds_by_platform:
+ raise Exception("multiple build jobs for " + platform)
+ builds_by_platform[platform] = task.label
+ return builds_by_platform
+
+ @classmethod
+ def get_test_platforms(cls, test_platforms_cfg, builds_by_platform):
+ """Get the test platforms for which test tasks should be generated,
+ based on the available build platforms. Returns a dictionary mapping
+ test platform to {test-set, build-platform, build-label}."""
+ test_platforms = {}
+ for test_platform, cfg in test_platforms_cfg.iteritems():
+ build_platform = cfg['build-platform']
+ if build_platform not in builds_by_platform:
+ logger.warning(
+ "No build task with platform {}; ignoring test platform {}".format(
+ build_platform, test_platform))
+ continue
+ test_platforms[test_platform] = {
+ 'test-set': cfg['test-set'],
+ 'build-platform': build_platform,
+ 'build-label': builds_by_platform[build_platform],
+ }
+ return test_platforms
+
+ @classmethod
+ def expand_tests(cls, test_sets_cfg, test_platforms):
+ """Expand the test sets in `test_platforms` out to sets of test names.
+ Returns a dictionary like `get_test_platforms`, with an additional
+ `test-names` key for each test platform, containing a set of test
+ names."""
+ rv = {}
+ for test_platform, cfg in test_platforms.iteritems():
+ test_set = cfg['test-set']
+ if test_set not in test_sets_cfg:
+ raise Exception(
+ "Test set '{}' for test platform {} is not defined".format(
+ test_set, test_platform))
+ test_names = test_sets_cfg[test_set]
+ rv[test_platform] = cfg.copy()
+ rv[test_platform]['test-names'] = test_names
+ return rv
diff --git a/taskcluster/taskgraph/task/transform.py b/taskcluster/taskgraph/task/transform.py
new file mode 100644
index 000000000..8183254a0
--- /dev/null
+++ b/taskcluster/taskgraph/task/transform.py
@@ -0,0 +1,109 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import itertools
+
+from . import base
+from .. import files_changed
+from ..util.python_path import find_object
+from ..util.templates import merge
+from ..util.yaml import load_yaml
+from ..util.seta import is_low_value_task
+
+from ..transforms.base import TransformSequence, TransformConfig
+
+logger = logging.getLogger(__name__)
+
+
+class TransformTask(base.Task):
+ """
+ Tasks of this class are generated by applying transformations to a sequence
+ of input entities. By default, it gets those inputs from YAML data in the
+ kind directory, but subclasses may override `get_inputs` to produce them in
+ some other way.
+ """
+
+ @classmethod
+ def get_inputs(cls, kind, path, config, params, loaded_tasks):
+ """
+ Get the input elements that will be transformed into tasks. The
+ elements themselves are free-form, and become the input to the first
+ transform.
+
+ By default, this reads jobs from the `jobs` key, or from yaml files
+ named by `jobs-from`. The entities are read from mappings, and the
+ keys to those mappings are added in the `name` key of each entity.
+
+ If there is a `job-defaults` config, then every job is merged with it.
+ This provides a simple way to set default values for all jobs of a
+ kind. More complex defaults should be implemented with custom
+ transforms.
+
+ This method can be overridden in subclasses that need to perform more
+ complex calculations to generate the list of inputs.
+ """
+ def jobs():
+ defaults = config.get('job-defaults')
+ jobs = config.get('jobs', {}).iteritems()
+ jobs_from = itertools.chain.from_iterable(
+ load_yaml(path, filename).iteritems()
+ for filename in config.get('jobs-from', {}))
+ for name, job in itertools.chain(jobs, jobs_from):
+ if defaults:
+ job = merge(defaults, job)
+ yield name, job
+
+ for name, job in jobs():
+ job['name'] = name
+ logger.debug("Generating tasks for {} {}".format(kind, name))
+ yield job
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, params, loaded_tasks):
+ inputs = cls.get_inputs(kind, path, config, params, loaded_tasks)
+
+ transforms = TransformSequence()
+ for xform_path in config['transforms']:
+ transform = find_object(xform_path)
+ transforms.add(transform)
+
+ # perform the transformations
+ trans_config = TransformConfig(kind, path, config, params)
+ tasks = [cls(kind, t) for t in transforms(trans_config, inputs)]
+ return tasks
+
+ def __init__(self, kind, task):
+ self.dependencies = task['dependencies']
+ self.when = task['when']
+ super(TransformTask, self).__init__(kind, task['label'],
+ task['attributes'], task['task'])
+
+ def get_dependencies(self, taskgraph):
+ return [(label, name) for name, label in self.dependencies.items()]
+
+ def optimize(self, params):
+ if 'files-changed' in self.when:
+ changed = files_changed.check(
+ params, self.when['files-changed'])
+ if not changed:
+ logger.debug('no files found matching a pattern in `when.files-changed` for ' +
+ self.label)
+ return True, None
+
+ # we would like to return 'False, None' while it's high_value_task
+ # and we wouldn't optimize it. Otherwise, it will return 'True, None'
+ if is_low_value_task(self.label, params.get('project')):
+ # Always optimize away low-value tasks
+ return True, None
+ else:
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ # when reading back from JSON, we lose the "when" information
+ task_dict['when'] = {}
+ return cls(task_dict['attributes']['kind'], task_dict)
diff --git a/taskcluster/taskgraph/taskgraph.py b/taskcluster/taskgraph/taskgraph.py
new file mode 100644
index 000000000..7736745ef
--- /dev/null
+++ b/taskcluster/taskgraph/taskgraph.py
@@ -0,0 +1,82 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+
+from .graph import Graph
+from .util.python_path import find_object
+
+TASKCLUSTER_QUEUE_URL = "https://queue.taskcluster.net/v1/task/"
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
+
+
+class TaskGraph(object):
+ """
+ Representation of a task graph.
+
+ A task graph is a combination of a Graph and a dictionary of tasks indexed
+ by label. TaskGraph instances should be treated as immutable.
+ """
+
+ def __init__(self, tasks, graph):
+ assert set(tasks) == graph.nodes
+ self.tasks = tasks
+ self.graph = graph
+
+ def to_json(self):
+ "Return a JSON-able object representing the task graph, as documented"
+ named_links_dict = self.graph.named_links_dict()
+ # this dictionary may be keyed by label or by taskid, so let's just call it 'key'
+ tasks = {}
+ for key in self.graph.visit_postorder():
+ task = self.tasks[key]
+ implementation = task.__class__.__module__ + ":" + task.__class__.__name__
+ task_json = {
+ 'label': task.label,
+ 'attributes': task.attributes,
+ 'dependencies': named_links_dict.get(key, {}),
+ 'task': task.task,
+ 'kind_implementation': implementation
+ }
+ if task.task_id:
+ task_json['task_id'] = task.task_id
+ tasks[key] = task_json
+ return tasks
+
+ def __getitem__(self, label):
+ "Get a task by label"
+ return self.tasks[label]
+
+ def __iter__(self):
+ "Iterate over tasks in undefined order"
+ return self.tasks.itervalues()
+
+ def __repr__(self):
+ return "<TaskGraph graph={!r} tasks={!r}>".format(self.graph, self.tasks)
+
+ def __eq__(self, other):
+ return self.tasks == other.tasks and self.graph == other.graph
+
+ @classmethod
+ def from_json(cls, tasks_dict):
+ """
+ This code is used to generate the a TaskGraph using a dictionary
+ which is representative of the TaskGraph.
+ """
+ tasks = {}
+ edges = set()
+ for key, value in tasks_dict.iteritems():
+ # We get the implementation from JSON
+ implementation = value['kind_implementation']
+ # Loading the module and creating a Task from a dictionary
+ task_kind = find_object(implementation)
+ tasks[key] = task_kind.from_json(value)
+ if 'task_id' in value:
+ tasks[key].task_id = value['task_id']
+ for depname, dep in value['dependencies'].iteritems():
+ edges.add((key, dep, depname))
+ task_graph = cls(tasks, Graph(set(tasks), edges))
+ return tasks, task_graph
diff --git a/taskcluster/taskgraph/test/__init__.py b/taskcluster/taskgraph/test/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/taskcluster/taskgraph/test/__init__.py
diff --git a/taskcluster/taskgraph/test/automationrelevance.json b/taskcluster/taskgraph/test/automationrelevance.json
new file mode 100644
index 000000000..8adfc446d
--- /dev/null
+++ b/taskcluster/taskgraph/test/automationrelevance.json
@@ -0,0 +1,425 @@
+{
+ "changesets": [
+ {
+ "author": "James Long <longster@gmail.com>",
+ "backsoutnodes": [],
+ "bugs": [
+ {
+ "no": "1300866",
+ "url": "https://bugzilla.mozilla.org/show_bug.cgi?id=1300866"
+ }
+ ],
+ "date": [
+ 1473196655.0,
+ 14400
+ ],
+ "desc": "Bug 1300866 - expose devtools require to new debugger r=jlast,bgrins",
+ "extra": {
+ "branch": "default"
+ },
+ "files": [
+ "devtools/client/debugger/new/index.html"
+ ],
+ "node": "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "parents": [
+ "37c9349b4e8167a61b08b7e119c21ea177b98942"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312890,
+ "reviewers": [
+ {
+ "name": "jlast",
+ "revset": "reviewer(jlast)"
+ },
+ {
+ "name": "bgrins",
+ "revset": "reviewer(bgrins)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ },
+ {
+ "author": "Wes Kocher <wkocher@mozilla.com>",
+ "backsoutnodes": [],
+ "bugs": [],
+ "date": [
+ 1473208638.0,
+ 25200
+ ],
+ "desc": "Merge m-c to fx-team, a=merge",
+ "extra": {
+ "branch": "default"
+ },
+ "files": [
+ "taskcluster/scripts/builder/build-l10n.sh"
+ ],
+ "node": "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "parents": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "91c2b9d5c1354ca79e5b174591dbb03b32b15bbf"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312891,
+ "reviewers": [
+ {
+ "name": "merge",
+ "revset": "reviewer(merge)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ },
+ {
+ "author": "Towkir Ahmed <towkir17@gmail.com>",
+ "backsoutnodes": [],
+ "bugs": [
+ {
+ "no": "1296648",
+ "url": "https://bugzilla.mozilla.org/show_bug.cgi?id=1296648"
+ }
+ ],
+ "date": [
+ 1472957580.0,
+ 14400
+ ],
+ "desc": "Bug 1296648 - Fix direction of .ruleview-expander.theme-twisty in RTL locales. r=ntim",
+ "extra": {
+ "branch": "default"
+ },
+ "files": [
+ "devtools/client/themes/rules.css"
+ ],
+ "node": "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "parents": [
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312892,
+ "reviewers": [
+ {
+ "name": "ntim",
+ "revset": "reviewer(ntim)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ },
+ {
+ "author": "Oriol <oriol-bugzilla@hotmail.com>",
+ "backsoutnodes": [],
+ "bugs": [
+ {
+ "no": "1300336",
+ "url": "https://bugzilla.mozilla.org/show_bug.cgi?id=1300336"
+ }
+ ],
+ "date": [
+ 1472921160.0,
+ 14400
+ ],
+ "desc": "Bug 1300336 - Allow pseudo-arrays to have a length property. r=fitzgen",
+ "extra": {
+ "branch": "default"
+ },
+ "files": [
+ "devtools/client/webconsole/test/browser_webconsole_output_06.js",
+ "devtools/server/actors/object.js"
+ ],
+ "node": "99c542fa43a72ee863c813b5624048d1b443549b",
+ "parents": [
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312893,
+ "reviewers": [
+ {
+ "name": "fitzgen",
+ "revset": "reviewer(fitzgen)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ },
+ {
+ "author": "Ruturaj Vartak <ruturaj@gmail.com>",
+ "backsoutnodes": [],
+ "bugs": [
+ {
+ "no": "1295010",
+ "url": "https://bugzilla.mozilla.org/show_bug.cgi?id=1295010"
+ }
+ ],
+ "date": [
+ 1472854020.0,
+ -7200
+ ],
+ "desc": "Bug 1295010 - Don't move the eyedropper to the out of browser window by keyboard navigation. r=pbro\n\nMozReview-Commit-ID: vBwmSxVNXK",
+ "extra": {
+ "amend_source": "6885024ef00cfa33d73c59dc03c48ebcda9ccbdd",
+ "branch": "default",
+ "histedit_source": "c43167f0a7cbe9f4c733b15da726e5150a9529ba",
+ "rebase_source": "b74df421630fc46dab6b6cc026bf3e0ae6b4a651"
+ },
+ "files": [
+ "devtools/client/inspector/test/browser_inspector_highlighter-eyedropper-events.js",
+ "devtools/client/inspector/test/head.js",
+ "devtools/server/actors/highlighters/eye-dropper.js"
+ ],
+ "node": "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "parents": [
+ "99c542fa43a72ee863c813b5624048d1b443549b"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312894,
+ "reviewers": [
+ {
+ "name": "pbro",
+ "revset": "reviewer(pbro)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ },
+ {
+ "author": "Matteo Ferretti <mferretti@mozilla.com>",
+ "backsoutnodes": [],
+ "bugs": [
+ {
+ "no": "1299154",
+ "url": "https://bugzilla.mozilla.org/show_bug.cgi?id=1299154"
+ }
+ ],
+ "date": [
+ 1472629906.0,
+ -7200
+ ],
+ "desc": "Bug 1299154 - added Set/GetOverrideDPPX to restorefromHistory; r=mstange\n\nMozReview-Commit-ID: AsyAcG3Igbn\n",
+ "extra": {
+ "branch": "default",
+ "committer": "Matteo Ferretti <mferretti@mozilla.com> 1473236511 -7200"
+ },
+ "files": [
+ "docshell/base/nsDocShell.cpp",
+ "dom/tests/mochitest/general/test_contentViewer_overrideDPPX.html"
+ ],
+ "node": "541c9086c0f27fba60beecc9bc94543103895c86",
+ "parents": [
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312895,
+ "reviewers": [
+ {
+ "name": "mstange",
+ "revset": "reviewer(mstange)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ },
+ {
+ "author": "Patrick Brosset <pbrosset@mozilla.com>",
+ "backsoutnodes": [],
+ "bugs": [
+ {
+ "no": "1295010",
+ "url": "https://bugzilla.mozilla.org/show_bug.cgi?id=1295010"
+ }
+ ],
+ "date": [
+ 1473239449.0,
+ -7200
+ ],
+ "desc": "Bug 1295010 - Removed testActor from highlighterHelper in inspector tests; r=me\n\nMozReview-Commit-ID: GMksl81iGcp",
+ "extra": {
+ "branch": "default"
+ },
+ "files": [
+ "devtools/client/inspector/test/browser_inspector_highlighter-eyedropper-events.js",
+ "devtools/client/inspector/test/head.js"
+ ],
+ "node": "041a925171e431bf51fb50193ab19d156088c89a",
+ "parents": [
+ "541c9086c0f27fba60beecc9bc94543103895c86"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312896,
+ "reviewers": [
+ {
+ "name": "me",
+ "revset": "reviewer(me)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ },
+ {
+ "author": "Carsten \"Tomcat\" Book <cbook@mozilla.com>",
+ "backsoutnodes": [],
+ "bugs": [],
+ "date": [
+ 1473261233.0,
+ -7200
+ ],
+ "desc": "merge fx-team to mozilla-central a=merge",
+ "extra": {
+ "branch": "default"
+ },
+ "files": [],
+ "node": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "parents": [
+ "3d0b41fdd93bd8233745eadb4e0358e385bf2cb9",
+ "041a925171e431bf51fb50193ab19d156088c89a"
+ ],
+ "perfherderurl": "https://treeherder.mozilla.org/perf.html#/compare?originalProject=mozilla-central&originalRevision=a14f88a9af7a59e677478694bafd9375ac53683e&newProject=mozilla-central&newRevision=ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "pushdate": [
+ 1473261248,
+ 0
+ ],
+ "pushhead": "a14f88a9af7a59e677478694bafd9375ac53683e",
+ "pushid": 30664,
+ "pushnodes": [
+ "ae2144aa4356b65c2f8c0de8c9082dcb7e330e24",
+ "73a6a267a50a0e1c41e689b265ad3eebe43d7ac6",
+ "16a1a91f9269ab95dd83eb29dc5d0227665f7d94",
+ "99c542fa43a72ee863c813b5624048d1b443549b",
+ "a6b6a93eb41a05e310a11f0172f01ba9b21d3eac",
+ "541c9086c0f27fba60beecc9bc94543103895c86",
+ "041a925171e431bf51fb50193ab19d156088c89a",
+ "a14f88a9af7a59e677478694bafd9375ac53683e"
+ ],
+ "pushuser": "cbook@mozilla.com",
+ "rev": 312897,
+ "reviewers": [
+ {
+ "name": "merge",
+ "revset": "reviewer(merge)"
+ }
+ ],
+ "treeherderrepo": "mozilla-central",
+ "treeherderrepourl": "https://treeherder.mozilla.org/#/jobs?repo=mozilla-central"
+ }
+ ],
+ "visible": true
+}
+
diff --git a/taskcluster/taskgraph/test/test_create.py b/taskcluster/taskgraph/test/test_create.py
new file mode 100644
index 000000000..b8da3aec0
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_create.py
@@ -0,0 +1,76 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+import os
+
+from .. import create
+from ..graph import Graph
+from ..taskgraph import TaskGraph
+from .util import TestTask
+
+from mozunit import main
+
+
+class TestCreate(unittest.TestCase):
+
+ def setUp(self):
+ self.old_task_id = os.environ.get('TASK_ID')
+ if 'TASK_ID' in os.environ:
+ del os.environ['TASK_ID']
+ self.created_tasks = {}
+ self.old_create_task = create._create_task
+ create._create_task = self.fake_create_task
+
+ def tearDown(self):
+ create._create_task = self.old_create_task
+ if self.old_task_id:
+ os.environ['TASK_ID'] = self.old_task_id
+ elif 'TASK_ID' in os.environ:
+ del os.environ['TASK_ID']
+
+ def fake_create_task(self, session, task_id, label, task_def):
+ self.created_tasks[task_id] = task_def
+
+ def test_create_tasks(self):
+ tasks = {
+ 'tid-a': TestTask(label='a', task={'payload': 'hello world'}),
+ 'tid-b': TestTask(label='b', task={'payload': 'hello world'}),
+ }
+ label_to_taskid = {'a': 'tid-a', 'b': 'tid-b'}
+ graph = Graph(nodes={'tid-a', 'tid-b'}, edges={('tid-a', 'tid-b', 'edge')})
+ taskgraph = TaskGraph(tasks, graph)
+
+ create.create_tasks(taskgraph, label_to_taskid, {'level': '4'})
+
+ for tid, task in self.created_tasks.iteritems():
+ self.assertEqual(task['payload'], 'hello world')
+ self.assertEqual(task['schedulerId'], 'gecko-level-4')
+ # make sure the dependencies exist, at least
+ for depid in task.get('dependencies', []):
+ if depid is 'decisiontask':
+ # Don't look for decisiontask here
+ continue
+ self.assertIn(depid, self.created_tasks)
+
+ def test_create_task_without_dependencies(self):
+ "a task with no dependencies depends on the decision task"
+ os.environ['TASK_ID'] = 'decisiontask'
+ tasks = {
+ 'tid-a': TestTask(label='a', task={'payload': 'hello world'}),
+ }
+ label_to_taskid = {'a': 'tid-a'}
+ graph = Graph(nodes={'tid-a'}, edges=set())
+ taskgraph = TaskGraph(tasks, graph)
+
+ create.create_tasks(taskgraph, label_to_taskid, {'level': '4'})
+
+ for tid, task in self.created_tasks.iteritems():
+ self.assertEqual(task.get('dependencies'), [os.environ['TASK_ID']])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_decision.py b/taskcluster/taskgraph/test/test_decision.py
new file mode 100644
index 000000000..364965206
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_decision.py
@@ -0,0 +1,78 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+import json
+import yaml
+import shutil
+import unittest
+import tempfile
+
+from .. import decision
+from ..graph import Graph
+from ..taskgraph import TaskGraph
+from .util import TestTask
+from mozunit import main
+
+
+class TestDecision(unittest.TestCase):
+
+ def test_taskgraph_to_json(self):
+ tasks = {
+ 'a': TestTask(label='a', attributes={'attr': 'a-task'}),
+ 'b': TestTask(label='b', task={'task': 'def'}),
+ }
+ graph = Graph(nodes=set('ab'), edges={('a', 'b', 'edgelabel')})
+ taskgraph = TaskGraph(tasks, graph)
+
+ res = taskgraph.to_json()
+
+ self.assertEqual(res, {
+ 'a': {
+ 'label': 'a',
+ 'attributes': {'attr': 'a-task', 'kind': 'test'},
+ 'task': {},
+ 'dependencies': {'edgelabel': 'b'},
+ 'kind_implementation': 'taskgraph.test.util:TestTask',
+ },
+ 'b': {
+ 'label': 'b',
+ 'attributes': {'kind': 'test'},
+ 'task': {'task': 'def'},
+ 'dependencies': {},
+ 'kind_implementation': 'taskgraph.test.util:TestTask',
+ }
+ })
+
+ def test_write_artifact_json(self):
+ data = [{'some': 'data'}]
+ tmpdir = tempfile.mkdtemp()
+ try:
+ decision.ARTIFACTS_DIR = os.path.join(tmpdir, "artifacts")
+ decision.write_artifact("artifact.json", data)
+ with open(os.path.join(decision.ARTIFACTS_DIR, "artifact.json")) as f:
+ self.assertEqual(json.load(f), data)
+ finally:
+ if os.path.exists(tmpdir):
+ shutil.rmtree(tmpdir)
+ decision.ARTIFACTS_DIR = 'artifacts'
+
+ def test_write_artifact_yml(self):
+ data = [{'some': 'data'}]
+ tmpdir = tempfile.mkdtemp()
+ try:
+ decision.ARTIFACTS_DIR = os.path.join(tmpdir, "artifacts")
+ decision.write_artifact("artifact.yml", data)
+ with open(os.path.join(decision.ARTIFACTS_DIR, "artifact.yml")) as f:
+ self.assertEqual(yaml.safe_load(f), data)
+ finally:
+ if os.path.exists(tmpdir):
+ shutil.rmtree(tmpdir)
+ decision.ARTIFACTS_DIR = 'artifacts'
+
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_files_changed.py b/taskcluster/taskgraph/test/test_files_changed.py
new file mode 100644
index 000000000..0e3366b3c
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_files_changed.py
@@ -0,0 +1,73 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import json
+import os
+import unittest
+
+from .. import files_changed
+
+PARAMS = {
+ 'head_repository': 'https://hg.mozilla.org/mozilla-central',
+ 'head_rev': 'a14f88a9af7a',
+}
+
+FILES_CHANGED = [
+ 'devtools/client/debugger/new/index.html',
+ 'devtools/client/inspector/test/browser_inspector_highlighter-eyedropper-events.js',
+ 'devtools/client/inspector/test/head.js',
+ 'devtools/client/themes/rules.css',
+ 'devtools/client/webconsole/test/browser_webconsole_output_06.js',
+ 'devtools/server/actors/highlighters/eye-dropper.js',
+ 'devtools/server/actors/object.js',
+ 'docshell/base/nsDocShell.cpp',
+ 'dom/tests/mochitest/general/test_contentViewer_overrideDPPX.html',
+ 'taskcluster/scripts/builder/build-l10n.sh',
+]
+
+
+class FakeResponse:
+
+ def json(self):
+ with open(os.path.join(os.path.dirname(__file__), 'automationrelevance.json')) as f:
+ return json.load(f)
+
+
+class TestGetChangedFiles(unittest.TestCase):
+
+ def setUp(self):
+ files_changed._cache.clear()
+ self.old_get = files_changed.requests.get
+
+ def fake_get(url, **kwargs):
+ return FakeResponse()
+ files_changed.requests.get = fake_get
+
+ def tearDown(self):
+ files_changed.requests.get = self.old_get
+
+ def test_get_changed_files(self):
+ """Get_changed_files correctly gets the list of changed files in a push.
+ This tests against the production hg.mozilla.org so that it will detect
+ any changes in the format of the returned data."""
+ self.assertEqual(
+ sorted(files_changed.get_changed_files(PARAMS['head_repository'], PARAMS['head_rev'])),
+ FILES_CHANGED)
+
+
+class TestCheck(unittest.TestCase):
+
+ def setUp(self):
+ files_changed._cache[PARAMS['head_repository'], PARAMS['head_rev']] = FILES_CHANGED
+
+ def test_check_no_params(self):
+ self.assertTrue(files_changed.check({}, ["ignored"]))
+
+ def test_check_no_match(self):
+ self.assertFalse(files_changed.check(PARAMS, ["nosuch/**"]))
+
+ def test_check_match(self):
+ self.assertTrue(files_changed.check(PARAMS, ["devtools/**"]))
diff --git a/taskcluster/taskgraph/test/test_generator.py b/taskcluster/taskgraph/test/test_generator.py
new file mode 100644
index 000000000..f1b466e4d
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_generator.py
@@ -0,0 +1,129 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from ..generator import TaskGraphGenerator, Kind
+from .. import graph
+from ..task import base
+from mozunit import main
+
+
+class FakeTask(base.Task):
+
+ def __init__(self, **kwargs):
+ self.i = kwargs.pop('i')
+ super(FakeTask, self).__init__(**kwargs)
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, parameters, loaded_tasks):
+ return [cls(kind=kind,
+ label='{}-t-{}'.format(kind, i),
+ attributes={'tasknum': str(i)},
+ task={},
+ i=i)
+ for i in range(3)]
+
+ def get_dependencies(self, full_task_set):
+ i = self.i
+ if i > 0:
+ return [('{}-t-{}'.format(self.kind, i - 1), 'prev')]
+ else:
+ return []
+
+ def optimize(self, params):
+ return False, None
+
+
+class FakeKind(Kind):
+
+ def _get_impl_class(self):
+ return FakeTask
+
+ def load_tasks(self, parameters, loaded_tasks):
+ FakeKind.loaded_kinds.append(self.name)
+ return super(FakeKind, self).load_tasks(parameters, loaded_tasks)
+
+
+class WithFakeKind(TaskGraphGenerator):
+
+ def _load_kinds(self):
+ for kind_name, deps in self.parameters['kinds']:
+ yield FakeKind(
+ kind_name, '/fake',
+ {'kind-dependencies': deps} if deps else {})
+
+
+class TestGenerator(unittest.TestCase):
+
+ def maketgg(self, target_tasks=None, kinds=[('fake', [])]):
+ FakeKind.loaded_kinds = []
+ self.target_tasks = target_tasks or []
+
+ def target_tasks_method(full_task_graph, parameters):
+ return self.target_tasks
+ return WithFakeKind('/root', {'kinds': kinds}, target_tasks_method)
+
+ def test_kind_ordering(self):
+ "When task kinds depend on each other, they are loaded in postorder"
+ self.tgg = self.maketgg(kinds=[
+ ('fake3', ['fake2', 'fake1']),
+ ('fake2', ['fake1']),
+ ('fake1', []),
+ ])
+ self.tgg._run_until('full_task_set')
+ self.assertEqual(FakeKind.loaded_kinds, ['fake1', 'fake2', 'fake3'])
+
+ def test_full_task_set(self):
+ "The full_task_set property has all tasks"
+ self.tgg = self.maketgg()
+ self.assertEqual(self.tgg.full_task_set.graph,
+ graph.Graph({'fake-t-0', 'fake-t-1', 'fake-t-2'}, set()))
+ self.assertEqual(sorted(self.tgg.full_task_set.tasks.keys()),
+ sorted(['fake-t-0', 'fake-t-1', 'fake-t-2']))
+
+ def test_full_task_graph(self):
+ "The full_task_graph property has all tasks, and links"
+ self.tgg = self.maketgg()
+ self.assertEqual(self.tgg.full_task_graph.graph,
+ graph.Graph({'fake-t-0', 'fake-t-1', 'fake-t-2'},
+ {
+ ('fake-t-1', 'fake-t-0', 'prev'),
+ ('fake-t-2', 'fake-t-1', 'prev'),
+ }))
+ self.assertEqual(sorted(self.tgg.full_task_graph.tasks.keys()),
+ sorted(['fake-t-0', 'fake-t-1', 'fake-t-2']))
+
+ def test_target_task_set(self):
+ "The target_task_set property has the targeted tasks"
+ self.tgg = self.maketgg(['fake-t-1'])
+ self.assertEqual(self.tgg.target_task_set.graph,
+ graph.Graph({'fake-t-1'}, set()))
+ self.assertEqual(self.tgg.target_task_set.tasks.keys(),
+ ['fake-t-1'])
+
+ def test_target_task_graph(self):
+ "The target_task_graph property has the targeted tasks and deps"
+ self.tgg = self.maketgg(['fake-t-1'])
+ self.assertEqual(self.tgg.target_task_graph.graph,
+ graph.Graph({'fake-t-0', 'fake-t-1'},
+ {('fake-t-1', 'fake-t-0', 'prev')}))
+ self.assertEqual(sorted(self.tgg.target_task_graph.tasks.keys()),
+ sorted(['fake-t-0', 'fake-t-1']))
+
+ def test_optimized_task_graph(self):
+ "The optimized task graph contains task ids"
+ self.tgg = self.maketgg(['fake-t-2'])
+ tid = self.tgg.label_to_taskid
+ self.assertEqual(
+ self.tgg.optimized_task_graph.graph,
+ graph.Graph({tid['fake-t-0'], tid['fake-t-1'], tid['fake-t-2']}, {
+ (tid['fake-t-1'], tid['fake-t-0'], 'prev'),
+ (tid['fake-t-2'], tid['fake-t-1'], 'prev'),
+ }))
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_graph.py b/taskcluster/taskgraph/test/test_graph.py
new file mode 100644
index 000000000..5c4c950a7
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_graph.py
@@ -0,0 +1,157 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from ..graph import Graph
+from mozunit import main
+
+
+class TestGraph(unittest.TestCase):
+
+ tree = Graph(set(['a', 'b', 'c', 'd', 'e', 'f', 'g']), {
+ ('a', 'b', 'L'),
+ ('a', 'c', 'L'),
+ ('b', 'd', 'K'),
+ ('b', 'e', 'K'),
+ ('c', 'f', 'N'),
+ ('c', 'g', 'N'),
+ })
+
+ linear = Graph(set(['1', '2', '3', '4']), {
+ ('1', '2', 'L'),
+ ('2', '3', 'L'),
+ ('3', '4', 'L'),
+ })
+
+ diamonds = Graph(set(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']),
+ set(tuple(x) for x in
+ 'AFL ADL BDL BEL CEL CHL DFL DGL EGL EHL FIL GIL GJL HJL'.split()
+ ))
+
+ multi_edges = Graph(set(['1', '2', '3', '4']), {
+ ('2', '1', 'red'),
+ ('2', '1', 'blue'),
+ ('3', '1', 'red'),
+ ('3', '2', 'blue'),
+ ('3', '2', 'green'),
+ ('4', '3', 'green'),
+ })
+
+ disjoint = Graph(set(['1', '2', '3', '4', 'α', 'β', 'γ']), {
+ ('2', '1', 'red'),
+ ('3', '1', 'red'),
+ ('3', '2', 'green'),
+ ('4', '3', 'green'),
+ ('α', 'β', 'πράσινο'),
+ ('β', 'γ', 'κόκκινο'),
+ ('α', 'γ', 'μπλε'),
+ })
+
+ def test_transitive_closure_empty(self):
+ "transitive closure of an empty set is an empty graph"
+ g = Graph(set(['a', 'b', 'c']), {('a', 'b', 'L'), ('a', 'c', 'L')})
+ self.assertEqual(g.transitive_closure(set()),
+ Graph(set(), set()))
+
+ def test_transitive_closure_disjoint(self):
+ "transitive closure of a disjoint set is a subset"
+ g = Graph(set(['a', 'b', 'c']), set())
+ self.assertEqual(g.transitive_closure(set(['a', 'c'])),
+ Graph(set(['a', 'c']), set()))
+
+ def test_transitive_closure_trees(self):
+ "transitive closure of a tree, at two non-root nodes, is the two subtrees"
+ self.assertEqual(self.tree.transitive_closure(set(['b', 'c'])),
+ Graph(set(['b', 'c', 'd', 'e', 'f', 'g']), {
+ ('b', 'd', 'K'),
+ ('b', 'e', 'K'),
+ ('c', 'f', 'N'),
+ ('c', 'g', 'N'),
+ }))
+
+ def test_transitive_closure_multi_edges(self):
+ "transitive closure of a tree with multiple edges between nodes keeps those edges"
+ self.assertEqual(self.multi_edges.transitive_closure(set(['3'])),
+ Graph(set(['1', '2', '3']), {
+ ('2', '1', 'red'),
+ ('2', '1', 'blue'),
+ ('3', '1', 'red'),
+ ('3', '2', 'blue'),
+ ('3', '2', 'green'),
+ }))
+
+ def test_transitive_closure_disjoint_edges(self):
+ "transitive closure of a disjoint graph keeps those edges"
+ self.assertEqual(self.disjoint.transitive_closure(set(['3', 'β'])),
+ Graph(set(['1', '2', '3', 'β', 'γ']), {
+ ('2', '1', 'red'),
+ ('3', '1', 'red'),
+ ('3', '2', 'green'),
+ ('β', 'γ', 'κόκκινο'),
+ }))
+
+ def test_transitive_closure_linear(self):
+ "transitive closure of a linear graph includes all nodes in the line"
+ self.assertEqual(self.linear.transitive_closure(set(['1'])), self.linear)
+
+ def test_visit_postorder_empty(self):
+ "postorder visit of an empty graph is empty"
+ self.assertEqual(list(Graph(set(), set()).visit_postorder()), [])
+
+ def assert_postorder(self, seq, all_nodes):
+ seen = set()
+ for e in seq:
+ for l, r, n in self.tree.edges:
+ if l == e:
+ self.failUnless(r in seen)
+ seen.add(e)
+ self.assertEqual(seen, all_nodes)
+
+ def test_visit_postorder_tree(self):
+ "postorder visit of a tree satisfies invariant"
+ self.assert_postorder(self.tree.visit_postorder(), self.tree.nodes)
+
+ def test_visit_postorder_diamonds(self):
+ "postorder visit of a graph full of diamonds satisfies invariant"
+ self.assert_postorder(self.diamonds.visit_postorder(), self.diamonds.nodes)
+
+ def test_visit_postorder_multi_edges(self):
+ "postorder visit of a graph with duplicate edges satisfies invariant"
+ self.assert_postorder(self.multi_edges.visit_postorder(), self.multi_edges.nodes)
+
+ def test_visit_postorder_disjoint(self):
+ "postorder visit of a disjoint graph satisfies invariant"
+ self.assert_postorder(self.disjoint.visit_postorder(), self.disjoint.nodes)
+
+ def test_links_dict(self):
+ "link dict for a graph with multiple edges is correct"
+ self.assertEqual(self.multi_edges.links_dict(), {
+ '2': set(['1']),
+ '3': set(['1', '2']),
+ '4': set(['3']),
+ })
+
+ def test_named_links_dict(self):
+ "named link dict for a graph with multiple edges is correct"
+ self.assertEqual(self.multi_edges.named_links_dict(), {
+ '2': dict(red='1', blue='1'),
+ '3': dict(red='1', blue='2', green='2'),
+ '4': dict(green='3'),
+ })
+
+ def test_reverse_links_dict(self):
+ "reverse link dict for a graph with multiple edges is correct"
+ self.assertEqual(self.multi_edges.reverse_links_dict(), {
+ '1': set(['2', '3']),
+ '2': set(['3']),
+ '3': set(['4']),
+ })
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_optimize.py b/taskcluster/taskgraph/test/test_optimize.py
new file mode 100644
index 000000000..8d2ddf247
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_optimize.py
@@ -0,0 +1,256 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from ..optimize import optimize_task_graph, resolve_task_references
+from ..optimize import annotate_task_graph, get_subgraph
+from ..taskgraph import TaskGraph
+from .. import graph
+from .util import TestTask
+
+
+class TestResolveTaskReferences(unittest.TestCase):
+
+ def do(self, input, output):
+ taskid_for_edge_name = {'edge%d' % n: 'tid%d' % n for n in range(1, 4)}
+ self.assertEqual(resolve_task_references('subject', input, taskid_for_edge_name), output)
+
+ def test_in_list(self):
+ "resolve_task_references resolves task references in a list"
+ self.do({'in-a-list': ['stuff', {'task-reference': '<edge1>'}]},
+ {'in-a-list': ['stuff', 'tid1']})
+
+ def test_in_dict(self):
+ "resolve_task_references resolves task references in a dict"
+ self.do({'in-a-dict': {'stuff': {'task-reference': '<edge2>'}}},
+ {'in-a-dict': {'stuff': 'tid2'}})
+
+ def test_multiple(self):
+ "resolve_task_references resolves multiple references in the same string"
+ self.do({'multiple': {'task-reference': 'stuff <edge1> stuff <edge2> after'}},
+ {'multiple': 'stuff tid1 stuff tid2 after'})
+
+ def test_embedded(self):
+ "resolve_task_references resolves ebmedded references"
+ self.do({'embedded': {'task-reference': 'stuff before <edge3> stuff after'}},
+ {'embedded': 'stuff before tid3 stuff after'})
+
+ def test_escaping(self):
+ "resolve_task_references resolves escapes in task references"
+ self.do({'escape': {'task-reference': '<<><edge3>>'}},
+ {'escape': '<tid3>'})
+
+ def test_invalid(self):
+ "resolve_task_references raises a KeyError on reference to an invalid task"
+ self.assertRaisesRegexp(
+ KeyError,
+ "task 'subject' has no dependency named 'no-such'",
+ lambda: resolve_task_references('subject', {'task-reference': '<no-such>'}, {})
+ )
+
+
+class OptimizingTask(TestTask):
+ # the `optimize` method on this class is overridden direclty in the tests
+ # below.
+ pass
+
+
+class TestOptimize(unittest.TestCase):
+
+ kind = None
+
+ def make_task(self, label, task_def=None, optimized=None, task_id=None):
+ task_def = task_def or {'sample': 'task-def'}
+ task = OptimizingTask(label=label, task=task_def)
+ task.optimized = optimized
+ task.task_id = task_id
+ return task
+
+ def make_graph(self, *tasks_and_edges):
+ tasks = {t.label: t for t in tasks_and_edges if isinstance(t, OptimizingTask)}
+ edges = {e for e in tasks_and_edges if not isinstance(e, OptimizingTask)}
+ return TaskGraph(tasks, graph.Graph(set(tasks), edges))
+
+ def assert_annotations(self, graph, **annotations):
+ def repl(task_id):
+ return 'SLUGID' if task_id and len(task_id) == 22 else task_id
+ got_annotations = {
+ t.label: (t.optimized, repl(t.task_id)) for t in graph.tasks.itervalues()
+ }
+ self.assertEqual(got_annotations, annotations)
+
+ def test_annotate_task_graph_no_optimize(self):
+ "annotating marks everything as un-optimized if the kind returns that"
+ OptimizingTask.optimize = lambda self, params: (False, None)
+ graph = self.make_graph(
+ self.make_task('task1'),
+ self.make_task('task2'),
+ self.make_task('task3'),
+ ('task2', 'task1', 'build'),
+ ('task2', 'task3', 'image'),
+ )
+ annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
+ self.assert_annotations(
+ graph,
+ task1=(False, None),
+ task2=(False, None),
+ task3=(False, None)
+ )
+
+ def test_annotate_task_graph_taskid_without_optimize(self):
+ "raises exception if kind returns a taskid without optimizing"
+ OptimizingTask.optimize = lambda self, params: (False, 'some-taskid')
+ graph = self.make_graph(self.make_task('task1'))
+ self.assertRaises(
+ Exception,
+ lambda: annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
+ )
+
+ def test_annotate_task_graph_optimize_away_dependency(self):
+ "raises exception if kind optimizes away a task on which another depends"
+ OptimizingTask.optimize = \
+ lambda self, params: (True, None) if self.label == 'task1' else (False, None)
+ graph = self.make_graph(
+ self.make_task('task1'),
+ self.make_task('task2'),
+ ('task2', 'task1', 'build'),
+ )
+ self.assertRaises(
+ Exception,
+ lambda: annotate_task_graph(graph, {}, set(), graph.graph.named_links_dict(), {}, None)
+ )
+
+ def test_annotate_task_graph_do_not_optimize(self):
+ "annotating marks everything as un-optimized if in do_not_optimize"
+ OptimizingTask.optimize = lambda self, params: (True, 'taskid')
+ graph = self.make_graph(
+ self.make_task('task1'),
+ self.make_task('task2'),
+ ('task2', 'task1', 'build'),
+ )
+ label_to_taskid = {}
+ annotate_task_graph(graph, {}, {'task1', 'task2'},
+ graph.graph.named_links_dict(), label_to_taskid, None)
+ self.assert_annotations(
+ graph,
+ task1=(False, None),
+ task2=(False, None)
+ )
+ self.assertEqual
+
+ def test_annotate_task_graph_nos_do_not_propagate(self):
+ "a task with a non-optimized dependency can be optimized"
+ OptimizingTask.optimize = \
+ lambda self, params: (False, None) if self.label == 'task1' else (True, 'taskid')
+ graph = self.make_graph(
+ self.make_task('task1'),
+ self.make_task('task2'),
+ self.make_task('task3'),
+ ('task2', 'task1', 'build'),
+ ('task2', 'task3', 'image'),
+ )
+ annotate_task_graph(graph, {}, set(),
+ graph.graph.named_links_dict(), {}, None)
+ self.assert_annotations(
+ graph,
+ task1=(False, None),
+ task2=(True, 'taskid'),
+ task3=(True, 'taskid')
+ )
+
+ def test_get_subgraph_single_dep(self):
+ "when a single dependency is optimized, it is omitted from the graph"
+ graph = self.make_graph(
+ self.make_task('task1', optimized=True, task_id='dep1'),
+ self.make_task('task2', optimized=False),
+ self.make_task('task3', optimized=False),
+ ('task2', 'task1', 'build'),
+ ('task2', 'task3', 'image'),
+ )
+ label_to_taskid = {'task1': 'dep1'}
+ sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+ task2 = label_to_taskid['task2']
+ task3 = label_to_taskid['task3']
+ self.assertEqual(sub.graph.nodes, {task2, task3})
+ self.assertEqual(sub.graph.edges, {(task2, task3, 'image')})
+ self.assertEqual(sub.tasks[task2].task_id, task2)
+ self.assertEqual(sorted(sub.tasks[task2].task['dependencies']),
+ sorted([task3, 'dep1']))
+ self.assertEqual(sub.tasks[task3].task_id, task3)
+ self.assertEqual(sorted(sub.tasks[task3].task['dependencies']), [])
+
+ def test_get_subgraph_dep_chain(self):
+ "when a dependency chain is optimized, it is omitted from the graph"
+ graph = self.make_graph(
+ self.make_task('task1', optimized=True, task_id='dep1'),
+ self.make_task('task2', optimized=True, task_id='dep2'),
+ self.make_task('task3', optimized=False),
+ ('task2', 'task1', 'build'),
+ ('task3', 'task2', 'image'),
+ )
+ label_to_taskid = {'task1': 'dep1', 'task2': 'dep2'}
+ sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+ task3 = label_to_taskid['task3']
+ self.assertEqual(sub.graph.nodes, {task3})
+ self.assertEqual(sub.graph.edges, set())
+ self.assertEqual(sub.tasks[task3].task_id, task3)
+ self.assertEqual(sorted(sub.tasks[task3].task['dependencies']), ['dep2'])
+
+ def test_get_subgraph_opt_away(self):
+ "when a leaf task is optimized away, it is omitted from the graph"
+ graph = self.make_graph(
+ self.make_task('task1', optimized=False),
+ self.make_task('task2', optimized=True),
+ ('task2', 'task1', 'build'),
+ )
+ label_to_taskid = {'task2': 'dep2'}
+ sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+ task1 = label_to_taskid['task1']
+ self.assertEqual(sub.graph.nodes, {task1})
+ self.assertEqual(sub.graph.edges, set())
+ self.assertEqual(sub.tasks[task1].task_id, task1)
+ self.assertEqual(sorted(sub.tasks[task1].task['dependencies']), [])
+
+ def test_get_subgraph_refs_resolved(self):
+ "get_subgraph resolves task references"
+ graph = self.make_graph(
+ self.make_task('task1', optimized=True, task_id='dep1'),
+ self.make_task(
+ 'task2',
+ optimized=False,
+ task_def={'payload': {'task-reference': 'http://<build>/<test>'}}
+ ),
+ ('task2', 'task1', 'build'),
+ ('task2', 'task3', 'test'),
+ self.make_task('task3', optimized=False),
+ )
+ label_to_taskid = {'task1': 'dep1'}
+ sub = get_subgraph(graph, graph.graph.named_links_dict(), label_to_taskid)
+ task2 = label_to_taskid['task2']
+ task3 = label_to_taskid['task3']
+ self.assertEqual(sub.graph.nodes, {task2, task3})
+ self.assertEqual(sub.graph.edges, {(task2, task3, 'test')})
+ self.assertEqual(sub.tasks[task2].task_id, task2)
+ self.assertEqual(sorted(sub.tasks[task2].task['dependencies']), sorted([task3, 'dep1']))
+ self.assertEqual(sub.tasks[task2].task['payload'], 'http://dep1/' + task3)
+ self.assertEqual(sub.tasks[task3].task_id, task3)
+
+ def test_optimize(self):
+ "optimize_task_graph annotates and extracts the subgraph from a simple graph"
+ OptimizingTask.optimize = \
+ lambda self, params: (True, 'dep1') if self.label == 'task1' else (False, None)
+ input = self.make_graph(
+ self.make_task('task1'),
+ self.make_task('task2'),
+ self.make_task('task3'),
+ ('task2', 'task1', 'build'),
+ ('task2', 'task3', 'image'),
+ )
+ opt, label_to_taskid = optimize_task_graph(input, {}, set())
+ self.assertEqual(opt.graph, graph.Graph(
+ {label_to_taskid['task2'], label_to_taskid['task3']},
+ {(label_to_taskid['task2'], label_to_taskid['task3'], 'image')}))
diff --git a/taskcluster/taskgraph/test/test_parameters.py b/taskcluster/taskgraph/test/test_parameters.py
new file mode 100644
index 000000000..43d853d7b
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_parameters.py
@@ -0,0 +1,62 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from ..parameters import Parameters, load_parameters_file, PARAMETER_NAMES
+from mozunit import main, MockedOpen
+
+
+class TestParameters(unittest.TestCase):
+
+ vals = {n: n for n in PARAMETER_NAMES}
+
+ def test_Parameters_immutable(self):
+ p = Parameters(**self.vals)
+
+ def assign():
+ p['head_ref'] = 20
+ self.assertRaises(Exception, assign)
+
+ def test_Parameters_missing_KeyError(self):
+ p = Parameters(**self.vals)
+ self.assertRaises(KeyError, lambda: p['z'])
+
+ def test_Parameters_invalid_KeyError(self):
+ """even if the value is present, if it's not a valid property, raise KeyError"""
+ p = Parameters(xyz=10, **self.vals)
+ self.assertRaises(KeyError, lambda: p['xyz'])
+
+ def test_Parameters_get(self):
+ p = Parameters(head_ref=10, level=20)
+ self.assertEqual(p['head_ref'], 10)
+
+ def test_Parameters_check(self):
+ p = Parameters(**self.vals)
+ p.check() # should not raise
+
+ def test_Parameters_check_missing(self):
+ p = Parameters()
+ self.assertRaises(Exception, lambda: p.check())
+
+ def test_Parameters_check_extra(self):
+ p = Parameters(xyz=10, **self.vals)
+ self.assertRaises(Exception, lambda: p.check())
+
+ def test_load_parameters_file_yaml(self):
+ with MockedOpen({"params.yml": "some: data\n"}):
+ self.assertEqual(
+ load_parameters_file({'parameters': 'params.yml'}),
+ {'some': 'data'})
+
+ def test_load_parameters_file_json(self):
+ with MockedOpen({"params.json": '{"some": "data"}'}):
+ self.assertEqual(
+ load_parameters_file({'parameters': 'params.json'}),
+ {'some': 'data'})
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_target_tasks.py b/taskcluster/taskgraph/test/test_target_tasks.py
new file mode 100644
index 000000000..035ccefd8
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_target_tasks.py
@@ -0,0 +1,81 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from .. import target_tasks
+from .. import try_option_syntax
+from ..graph import Graph
+from ..taskgraph import TaskGraph
+from .util import TestTask
+from mozunit import main
+
+
+class FakeTryOptionSyntax(object):
+
+ def __init__(self, message, task_graph):
+ self.trigger_tests = 0
+ self.notifications = None
+
+ def task_matches(self, attributes):
+ return 'at-at' in attributes
+
+
+class TestTargetTasks(unittest.TestCase):
+
+ def default_matches(self, run_on_projects, project):
+ method = target_tasks.get_method('default')
+ graph = TaskGraph(tasks={
+ 'a': TestTask(kind='build', label='a',
+ attributes={'run_on_projects': run_on_projects}),
+ }, graph=Graph(nodes={'a'}, edges=set()))
+ parameters = {'project': project}
+ return 'a' in method(graph, parameters)
+
+ def test_default_all(self):
+ """run_on_projects=[all] includes release, integration, and other projects"""
+ self.assertTrue(self.default_matches(['all'], 'mozilla-central'))
+ self.assertTrue(self.default_matches(['all'], 'mozilla-inbound'))
+ self.assertTrue(self.default_matches(['all'], 'mozilla-aurora'))
+ self.assertTrue(self.default_matches(['all'], 'baobab'))
+
+ def test_default_integration(self):
+ """run_on_projects=[integration] includes integration projects"""
+ self.assertFalse(self.default_matches(['integration'], 'mozilla-central'))
+ self.assertTrue(self.default_matches(['integration'], 'mozilla-inbound'))
+ self.assertFalse(self.default_matches(['integration'], 'baobab'))
+
+ def test_default_relesae(self):
+ """run_on_projects=[release] includes release projects"""
+ self.assertTrue(self.default_matches(['release'], 'mozilla-central'))
+ self.assertFalse(self.default_matches(['release'], 'mozilla-inbound'))
+ self.assertFalse(self.default_matches(['release'], 'baobab'))
+
+ def test_default_nothing(self):
+ """run_on_projects=[] includes nothing"""
+ self.assertFalse(self.default_matches([], 'mozilla-central'))
+ self.assertFalse(self.default_matches([], 'mozilla-inbound'))
+ self.assertFalse(self.default_matches([], 'baobab'))
+
+ def test_try_option_syntax(self):
+ tasks = {
+ 'a': TestTask(kind=None, label='a'),
+ 'b': TestTask(kind=None, label='b', attributes={'at-at': 'yep'}),
+ }
+ graph = Graph(nodes=set('ab'), edges=set())
+ tg = TaskGraph(tasks, graph)
+ params = {'message': 'try me'}
+
+ orig_TryOptionSyntax = try_option_syntax.TryOptionSyntax
+ try:
+ try_option_syntax.TryOptionSyntax = FakeTryOptionSyntax
+ method = target_tasks.get_method('try_option_syntax')
+ self.assertEqual(method(tg, params), ['b'])
+ finally:
+ try_option_syntax.TryOptionSyntax = orig_TryOptionSyntax
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_task_docker_image.py b/taskcluster/taskgraph/test/test_task_docker_image.py
new file mode 100644
index 000000000..8f247db3e
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_task_docker_image.py
@@ -0,0 +1,35 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+import os
+
+from ..task import docker_image
+from mozunit import main
+
+
+KIND_PATH = os.path.join(docker_image.GECKO, 'taskcluster', 'ci', 'docker-image')
+
+
+class TestDockerImageKind(unittest.TestCase):
+
+ def setUp(self):
+ self.task = docker_image.DockerImageTask(
+ 'docker-image',
+ KIND_PATH,
+ {},
+ {},
+ index_paths=[])
+
+ def test_get_task_dependencies(self):
+ # this one's easy!
+ self.assertEqual(self.task.get_dependencies(None), [])
+
+ # TODO: optimize_task
+
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_taskgraph.py b/taskcluster/taskgraph/test/test_taskgraph.py
new file mode 100644
index 000000000..f8f09bce9
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_taskgraph.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from ..graph import Graph
+from ..task.docker_image import DockerImageTask
+from ..task.transform import TransformTask
+from ..taskgraph import TaskGraph
+from mozunit import main
+from taskgraph.util.docker import INDEX_PREFIX
+
+
+class TestTargetTasks(unittest.TestCase):
+
+ def test_from_json(self):
+ task = {
+ "routes": [],
+ "extra": {
+ "imageMeta": {
+ "contextHash": "<hash>",
+ "imageName": "<image>",
+ "level": "1"
+ }
+ }
+ }
+ index_paths = ["{}.level-{}.<image>.hash.<hash>".format(INDEX_PREFIX, level)
+ for level in range(1, 4)]
+ graph = TaskGraph(tasks={
+ 'a': TransformTask(
+ kind='fancy',
+ task={
+ 'label': 'a',
+ 'attributes': {},
+ 'dependencies': {},
+ 'when': {},
+ 'task': {'task': 'def'},
+ }),
+ 'b': DockerImageTask(kind='docker-image',
+ label='b',
+ attributes={},
+ task=task,
+ index_paths=index_paths),
+ }, graph=Graph(nodes={'a', 'b'}, edges=set()))
+
+ tasks, new_graph = TaskGraph.from_json(graph.to_json())
+ self.assertEqual(graph.tasks['a'], new_graph.tasks['a'])
+ self.assertEqual(graph, new_graph)
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_transforms_base.py b/taskcluster/taskgraph/test/test_transforms_base.py
new file mode 100644
index 000000000..0a0dfcaf2
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_transforms_base.py
@@ -0,0 +1,143 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+from mozunit import main
+from taskgraph.transforms.base import (
+ validate_schema,
+ get_keyed_by,
+ TransformSequence
+)
+from voluptuous import Schema
+
+schema = Schema({
+ 'x': int,
+ 'y': basestring,
+})
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def trans1(config, tests):
+ for test in tests:
+ test['one'] = 1
+ yield test
+
+
+@transforms.add
+def trans2(config, tests):
+ for test in tests:
+ test['two'] = 2
+ yield test
+
+
+class TestTransformSequence(unittest.TestCase):
+
+ def test_sequence(self):
+ tests = [{}, {'two': 1, 'second': True}]
+ res = list(transforms({}, tests))
+ self.assertEqual(res, [
+ {u'two': 2, u'one': 1},
+ {u'second': True, u'two': 2, u'one': 1},
+ ])
+
+
+class TestValidateSchema(unittest.TestCase):
+
+ def test_valid(self):
+ validate_schema(schema, {'x': 10, 'y': 'foo'}, "pfx")
+
+ def test_invalid(self):
+ try:
+ validate_schema(schema, {'x': 'not-int'}, "pfx")
+ self.fail("no exception raised")
+ except Exception, e:
+ self.failUnless(str(e).startswith("pfx\n"))
+
+
+class TestKeyedBy(unittest.TestCase):
+
+ def test_simple_value(self):
+ test = {
+ 'test-name': 'tname',
+ 'option': 10,
+ }
+ self.assertEqual(get_keyed_by(test, 'option', 'x'), 10)
+
+ def test_by_value(self):
+ test = {
+ 'test-name': 'tname',
+ 'option': {
+ 'by-other-value': {
+ 'a': 10,
+ 'b': 20,
+ },
+ },
+ 'other-value': 'b',
+ }
+ self.assertEqual(get_keyed_by(test, 'option', 'x'), 20)
+
+ def test_by_value_regex(self):
+ test = {
+ 'test-name': 'tname',
+ 'option': {
+ 'by-test-platform': {
+ 'macosx64/.*': 10,
+ 'linux64/debug': 20,
+ 'default': 5,
+ },
+ },
+ 'test-platform': 'macosx64/debug',
+ }
+ self.assertEqual(get_keyed_by(test, 'option', 'x'), 10)
+
+ def test_by_value_default(self):
+ test = {
+ 'test-name': 'tname',
+ 'option': {
+ 'by-other-value': {
+ 'a': 10,
+ 'default': 30,
+ },
+ },
+ 'other-value': 'xxx',
+ }
+ self.assertEqual(get_keyed_by(test, 'option', 'x'), 30)
+
+ def test_by_value_invalid_dict(self):
+ test = {
+ 'test-name': 'tname',
+ 'option': {
+ 'by-something-else': {},
+ 'by-other-value': {},
+ },
+ }
+ self.assertRaises(Exception, get_keyed_by, test, 'option', 'x')
+
+ def test_by_value_invalid_no_default(self):
+ test = {
+ 'test-name': 'tname',
+ 'option': {
+ 'by-other-value': {
+ 'a': 10,
+ },
+ },
+ 'other-value': 'b',
+ }
+ self.assertRaises(Exception, get_keyed_by, test, 'option', 'x')
+
+ def test_by_value_invalid_no_by(self):
+ test = {
+ 'test-name': 'tname',
+ 'option': {
+ 'other-value': {},
+ },
+ }
+ self.assertRaises(Exception, get_keyed_by, test, 'option', 'x')
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_try_option_syntax.py b/taskcluster/taskgraph/test/test_try_option_syntax.py
new file mode 100644
index 000000000..29aa2d5a9
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_try_option_syntax.py
@@ -0,0 +1,274 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+import itertools
+
+from ..try_option_syntax import TryOptionSyntax
+from ..try_option_syntax import RIDEALONG_BUILDS
+from ..graph import Graph
+from ..taskgraph import TaskGraph
+from .util import TestTask
+from mozunit import main
+
+# an empty graph, for things that don't look at it
+empty_graph = TaskGraph({}, Graph(set(), set()))
+
+
+def unittest_task(n, tp):
+ return (n, TestTask('test', n, {
+ 'unittest_try_name': n,
+ 'test_platform': tp,
+ }))
+
+
+def talos_task(n, tp):
+ return (n, TestTask('test', n, {
+ 'talos_try_name': n,
+ 'test_platform': tp,
+ }))
+
+tasks = {k: v for k, v in [
+ unittest_task('mochitest-browser-chrome', 'linux'),
+ unittest_task('mochitest-e10s-browser-chrome', 'linux64'),
+ unittest_task('mochitest-chrome', 'linux'),
+ unittest_task('mochitest-webgl', 'linux'),
+ unittest_task('crashtest-e10s', 'linux'),
+ unittest_task('gtest', 'linux64'),
+ talos_task('dromaeojs', 'linux64'),
+]}
+unittest_tasks = {k: v for k, v in tasks.iteritems()
+ if 'unittest_try_name' in v.attributes}
+talos_tasks = {k: v for k, v in tasks.iteritems()
+ if 'talos_try_name' in v.attributes}
+graph_with_jobs = TaskGraph(tasks, Graph(set(tasks), set()))
+
+
+class TestTryOptionSyntax(unittest.TestCase):
+
+ def test_empty_message(self):
+ "Given an empty message, it should return an empty value"
+ tos = TryOptionSyntax('', empty_graph)
+ self.assertEqual(tos.build_types, [])
+ self.assertEqual(tos.jobs, [])
+ self.assertEqual(tos.unittests, [])
+ self.assertEqual(tos.talos, [])
+ self.assertEqual(tos.platforms, [])
+
+ def test_message_without_try(self):
+ "Given a non-try message, it should return an empty value"
+ tos = TryOptionSyntax('Bug 1234: frobnicte the foo', empty_graph)
+ self.assertEqual(tos.build_types, [])
+ self.assertEqual(tos.jobs, [])
+ self.assertEqual(tos.unittests, [])
+ self.assertEqual(tos.talos, [])
+ self.assertEqual(tos.platforms, [])
+
+ def test_unknown_args(self):
+ "unknown arguments are ignored"
+ tos = TryOptionSyntax('try: --doubledash -z extra', empty_graph)
+ # equilvant to "try:"..
+ self.assertEqual(tos.build_types, [])
+ self.assertEqual(tos.jobs, None)
+
+ def test_b_do(self):
+ "-b do should produce both build_types"
+ tos = TryOptionSyntax('try: -b do', empty_graph)
+ self.assertEqual(sorted(tos.build_types), ['debug', 'opt'])
+
+ def test_b_d(self):
+ "-b d should produce build_types=['debug']"
+ tos = TryOptionSyntax('try: -b d', empty_graph)
+ self.assertEqual(sorted(tos.build_types), ['debug'])
+
+ def test_b_o(self):
+ "-b o should produce build_types=['opt']"
+ tos = TryOptionSyntax('try: -b o', empty_graph)
+ self.assertEqual(sorted(tos.build_types), ['opt'])
+
+ def test_build_o(self):
+ "--build o should produce build_types=['opt']"
+ tos = TryOptionSyntax('try: --build o', empty_graph)
+ self.assertEqual(sorted(tos.build_types), ['opt'])
+
+ def test_b_dx(self):
+ "-b dx should produce build_types=['debug'], silently ignoring the x"
+ tos = TryOptionSyntax('try: -b dx', empty_graph)
+ self.assertEqual(sorted(tos.build_types), ['debug'])
+
+ def test_j_job(self):
+ "-j somejob sets jobs=['somejob']"
+ tos = TryOptionSyntax('try: -j somejob', empty_graph)
+ self.assertEqual(sorted(tos.jobs), ['somejob'])
+
+ def test_j_jobs(self):
+ "-j job1,job2 sets jobs=['job1', 'job2']"
+ tos = TryOptionSyntax('try: -j job1,job2', empty_graph)
+ self.assertEqual(sorted(tos.jobs), ['job1', 'job2'])
+
+ def test_j_all(self):
+ "-j all sets jobs=None"
+ tos = TryOptionSyntax('try: -j all', empty_graph)
+ self.assertEqual(tos.jobs, None)
+
+ def test_j_twice(self):
+ "-j job1 -j job2 sets jobs=job1, job2"
+ tos = TryOptionSyntax('try: -j job1 -j job2', empty_graph)
+ self.assertEqual(sorted(tos.jobs), sorted(['job1', 'job2']))
+
+ def test_p_all(self):
+ "-p all sets platforms=None"
+ tos = TryOptionSyntax('try: -p all', empty_graph)
+ self.assertEqual(tos.platforms, None)
+
+ def test_p_linux(self):
+ "-p linux sets platforms=['linux', 'linux-l10n']"
+ tos = TryOptionSyntax('try: -p linux', empty_graph)
+ self.assertEqual(tos.platforms, ['linux', 'linux-l10n'])
+
+ def test_p_linux_win32(self):
+ "-p linux,win32 sets platforms=['linux', 'linux-l10n', 'win32']"
+ tos = TryOptionSyntax('try: -p linux,win32', empty_graph)
+ self.assertEqual(sorted(tos.platforms), ['linux', 'linux-l10n', 'win32'])
+
+ def test_p_expands_ridealongs(self):
+ "-p linux,linux64 includes the RIDEALONG_BUILDS"
+ tos = TryOptionSyntax('try: -p linux,linux64', empty_graph)
+ ridealongs = list(task
+ for task in itertools.chain.from_iterable(
+ RIDEALONG_BUILDS.itervalues()
+ )
+ if 'android' not in task) # Don't include android-l10n
+ self.assertEqual(sorted(tos.platforms), sorted(['linux', 'linux64'] + ridealongs))
+
+ def test_u_none(self):
+ "-u none sets unittests=[]"
+ tos = TryOptionSyntax('try: -u none', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), [])
+
+ def test_u_all(self):
+ "-u all sets unittests=[..whole list..]"
+ tos = TryOptionSyntax('try: -u all', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([{'test': t} for t in unittest_tasks]))
+
+ def test_u_single(self):
+ "-u mochitest-webgl sets unittests=[mochitest-webgl]"
+ tos = TryOptionSyntax('try: -u mochitest-webgl', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([{'test': 'mochitest-webgl'}]))
+
+ def test_u_alias(self):
+ "-u mochitest-gl sets unittests=[mochitest-webgl]"
+ tos = TryOptionSyntax('try: -u mochitest-gl', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([{'test': 'mochitest-webgl'}]))
+
+ def test_u_multi_alias(self):
+ "-u e10s sets unittests=[all e10s unittests]"
+ tos = TryOptionSyntax('try: -u e10s', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': t} for t in unittest_tasks if 'e10s' in t
+ ]))
+
+ def test_u_commas(self):
+ "-u mochitest-webgl,gtest sets unittests=both"
+ tos = TryOptionSyntax('try: -u mochitest-webgl,gtest', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'mochitest-webgl'},
+ {'test': 'gtest'},
+ ]))
+
+ def test_u_chunks(self):
+ "-u gtest-3,gtest-4 selects the third and fourth chunk of gtest"
+ tos = TryOptionSyntax('try: -u gtest-3,gtest-4', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'gtest', 'only_chunks': set('34')},
+ ]))
+
+ def test_u_platform(self):
+ "-u gtest[linux] selects the linux platform for gtest"
+ tos = TryOptionSyntax('try: -u gtest[linux]', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'gtest', 'platforms': ['linux']},
+ ]))
+
+ def test_u_platforms(self):
+ "-u gtest[linux,win32] selects the linux and win32 platforms for gtest"
+ tos = TryOptionSyntax('try: -u gtest[linux,win32]', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'gtest', 'platforms': ['linux', 'win32']},
+ ]))
+
+ def test_u_platforms_pretty(self):
+ "-u gtest[Ubuntu] selects the linux, linux64 and linux64-asan platforms for gtest"
+ tos = TryOptionSyntax('try: -u gtest[Ubuntu]', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'gtest', 'platforms': ['linux', 'linux64', 'linux64-asan']},
+ ]))
+
+ def test_u_platforms_negated(self):
+ "-u gtest[-linux] selects all platforms but linux for gtest"
+ tos = TryOptionSyntax('try: -u gtest[-linux]', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'gtest', 'platforms': ['linux64']},
+ ]))
+
+ def test_u_platforms_negated_pretty(self):
+ "-u gtest[Ubuntu,-x64] selects just linux for gtest"
+ tos = TryOptionSyntax('try: -u gtest[Ubuntu,-x64]', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'gtest', 'platforms': ['linux']},
+ ]))
+
+ def test_u_chunks_platforms(self):
+ "-u gtest-1[linux,win32] selects the linux and win32 platforms for chunk 1 of gtest"
+ tos = TryOptionSyntax('try: -u gtest-1[linux,win32]', graph_with_jobs)
+ self.assertEqual(sorted(tos.unittests), sorted([
+ {'test': 'gtest', 'platforms': ['linux', 'win32'], 'only_chunks': set('1')},
+ ]))
+
+ def test_t_none(self):
+ "-t none sets talos=[]"
+ tos = TryOptionSyntax('try: -t none', graph_with_jobs)
+ self.assertEqual(sorted(tos.talos), [])
+
+ def test_t_all(self):
+ "-t all sets talos=[..whole list..]"
+ tos = TryOptionSyntax('try: -t all', graph_with_jobs)
+ self.assertEqual(sorted(tos.talos), sorted([{'test': t} for t in talos_tasks]))
+
+ def test_t_single(self):
+ "-t mochitest-webgl sets talos=[mochitest-webgl]"
+ tos = TryOptionSyntax('try: -t mochitest-webgl', graph_with_jobs)
+ self.assertEqual(sorted(tos.talos), sorted([{'test': 'mochitest-webgl'}]))
+
+ # -t shares an implementation with -u, so it's not tested heavily
+
+ def test_trigger_tests(self):
+ "--rebuild 10 sets trigger_tests"
+ tos = TryOptionSyntax('try: --rebuild 10', empty_graph)
+ self.assertEqual(tos.trigger_tests, 10)
+
+ def test_interactive(self):
+ "--interactive sets interactive"
+ tos = TryOptionSyntax('try: --interactive', empty_graph)
+ self.assertEqual(tos.interactive, True)
+
+ def test_all_email(self):
+ "--all-emails sets notifications"
+ tos = TryOptionSyntax('try: --all-emails', empty_graph)
+ self.assertEqual(tos.notifications, 'all')
+
+ def test_fail_email(self):
+ "--failure-emails sets notifications"
+ tos = TryOptionSyntax('try: --failure-emails', empty_graph)
+ self.assertEqual(tos.notifications, 'failure')
+
+ def test_no_email(self):
+ "no email settings don't set notifications"
+ tos = TryOptionSyntax('try:', empty_graph)
+ self.assertEqual(tos.notifications, None)
+
+if __name__ == '__main__':
+ main()
diff --git a/taskcluster/taskgraph/test/test_util_attributes.py b/taskcluster/taskgraph/test/test_util_attributes.py
new file mode 100644
index 000000000..c3575e917
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util_attributes.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+from taskgraph.util.attributes import attrmatch
+
+
+class Attrmatch(unittest.TestCase):
+
+ def test_trivial_match(self):
+ """Given no conditions, anything matches"""
+ self.assertTrue(attrmatch({}))
+
+ def test_missing_attribute(self):
+ """If a filtering attribute is not present, no match"""
+ self.assertFalse(attrmatch({}, someattr=10))
+
+ def test_literal_attribute(self):
+ """Literal attributes must match exactly"""
+ self.assertTrue(attrmatch({'att': 10}, att=10))
+ self.assertFalse(attrmatch({'att': 10}, att=20))
+
+ def test_set_attribute(self):
+ """Set attributes require set membership"""
+ self.assertTrue(attrmatch({'att': 10}, att=set([9, 10])))
+ self.assertFalse(attrmatch({'att': 10}, att=set([19, 20])))
+
+ def test_callable_attribute(self):
+ """Callable attributes are called and any False causes the match to fail"""
+ self.assertTrue(attrmatch({'att': 10}, att=lambda val: True))
+ self.assertFalse(attrmatch({'att': 10}, att=lambda val: False))
+
+ def even(val):
+ return val % 2 == 0
+ self.assertTrue(attrmatch({'att': 10}, att=even))
+ self.assertFalse(attrmatch({'att': 11}, att=even))
+
+ def test_all_matches_required(self):
+ """If only one attribute does not match, the result is False"""
+ self.assertFalse(attrmatch({'a': 1}, a=1, b=2, c=3))
+ self.assertFalse(attrmatch({'a': 1, 'b': 2}, a=1, b=2, c=3))
+ self.assertTrue(attrmatch({'a': 1, 'b': 2, 'c': 3}, a=1, b=2, c=3))
diff --git a/taskcluster/taskgraph/test/test_util_docker.py b/taskcluster/taskgraph/test/test_util_docker.py
new file mode 100644
index 000000000..3bb7fe8f0
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util_docker.py
@@ -0,0 +1,194 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+import shutil
+import stat
+import tarfile
+import tempfile
+import unittest
+
+from ..util import docker
+from mozunit import MockedOpen
+
+
+MODE_STANDARD = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
+
+
+class TestDocker(unittest.TestCase):
+
+ def test_generate_context_hash(self):
+ tmpdir = tempfile.mkdtemp()
+ old_GECKO = docker.GECKO
+ docker.GECKO = tmpdir
+ try:
+ os.makedirs(os.path.join(tmpdir, 'docker', 'my-image'))
+ p = os.path.join(tmpdir, 'docker', 'my-image', 'Dockerfile')
+ with open(p, 'w') as f:
+ f.write("FROM node\nADD a-file\n")
+ os.chmod(p, MODE_STANDARD)
+ p = os.path.join(tmpdir, 'docker', 'my-image', 'a-file')
+ with open(p, 'w') as f:
+ f.write("data\n")
+ os.chmod(p, MODE_STANDARD)
+ self.assertEqual(
+ docker.generate_context_hash(docker.GECKO,
+ os.path.join(docker.GECKO, 'docker/my-image'),
+ 'my-image'),
+ 'e61e675ce05e8c11424437db3f1004079374c1a5fe6ad6800346cebe137b0797'
+ )
+ finally:
+ docker.GECKO = old_GECKO
+ shutil.rmtree(tmpdir)
+
+ def test_docker_image_explicit_registry(self):
+ files = {}
+ files["{}/myimage/REGISTRY".format(docker.DOCKER_ROOT)] = "cool-images"
+ files["{}/myimage/VERSION".format(docker.DOCKER_ROOT)] = "1.2.3"
+ with MockedOpen(files):
+ self.assertEqual(docker.docker_image('myimage'), "cool-images/myimage:1.2.3")
+
+ def test_docker_image_default_registry(self):
+ files = {}
+ files["{}/REGISTRY".format(docker.DOCKER_ROOT)] = "mozilla"
+ files["{}/myimage/VERSION".format(docker.DOCKER_ROOT)] = "1.2.3"
+ with MockedOpen(files):
+ self.assertEqual(docker.docker_image('myimage'), "mozilla/myimage:1.2.3")
+
+ def test_create_context_tar_basic(self):
+ tmp = tempfile.mkdtemp()
+ try:
+ d = os.path.join(tmp, 'test_image')
+ os.mkdir(d)
+ with open(os.path.join(d, 'Dockerfile'), 'a'):
+ pass
+ os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)
+
+ with open(os.path.join(d, 'extra'), 'a'):
+ pass
+ os.chmod(os.path.join(d, 'extra'), MODE_STANDARD)
+
+ tp = os.path.join(tmp, 'tar')
+ h = docker.create_context_tar(tmp, d, tp, 'my_image')
+ self.assertEqual(h, '2a6d7f1627eba60daf85402418e041d728827d309143c6bc1c6bb3035bde6717')
+
+ # File prefix should be "my_image"
+ with tarfile.open(tp, 'r:gz') as tf:
+ self.assertEqual(tf.getnames(), [
+ 'my_image/Dockerfile',
+ 'my_image/extra',
+ ])
+ finally:
+ shutil.rmtree(tmp)
+
+ def test_create_context_topsrcdir_files(self):
+ tmp = tempfile.mkdtemp()
+ try:
+ d = os.path.join(tmp, 'test-image')
+ os.mkdir(d)
+ with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
+ fh.write(b'# %include extra/file0\n')
+ os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)
+
+ extra = os.path.join(tmp, 'extra')
+ os.mkdir(extra)
+ with open(os.path.join(extra, 'file0'), 'a'):
+ pass
+ os.chmod(os.path.join(extra, 'file0'), MODE_STANDARD)
+
+ tp = os.path.join(tmp, 'tar')
+ h = docker.create_context_tar(tmp, d, tp, 'test_image')
+ self.assertEqual(h, '20faeb7c134f21187b142b5fadba94ae58865dc929c6c293d8cbc0a087269338')
+
+ with tarfile.open(tp, 'r:gz') as tf:
+ self.assertEqual(tf.getnames(), [
+ 'test_image/Dockerfile',
+ 'test_image/topsrcdir/extra/file0',
+ ])
+ finally:
+ shutil.rmtree(tmp)
+
+ def test_create_context_absolute_path(self):
+ tmp = tempfile.mkdtemp()
+ try:
+ d = os.path.join(tmp, 'test-image')
+ os.mkdir(d)
+
+ # Absolute paths in %include syntax are not allowed.
+ with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
+ fh.write(b'# %include /etc/shadow\n')
+
+ with self.assertRaisesRegexp(Exception, 'cannot be absolute'):
+ docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
+ finally:
+ shutil.rmtree(tmp)
+
+ def test_create_context_outside_topsrcdir(self):
+ tmp = tempfile.mkdtemp()
+ try:
+ d = os.path.join(tmp, 'test-image')
+ os.mkdir(d)
+
+ with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
+ fh.write(b'# %include foo/../../../etc/shadow\n')
+
+ with self.assertRaisesRegexp(Exception, 'path outside topsrcdir'):
+ docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
+ finally:
+ shutil.rmtree(tmp)
+
+ def test_create_context_missing_extra(self):
+ tmp = tempfile.mkdtemp()
+ try:
+ d = os.path.join(tmp, 'test-image')
+ os.mkdir(d)
+
+ with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
+ fh.write(b'# %include does/not/exist\n')
+
+ with self.assertRaisesRegexp(Exception, 'path does not exist'):
+ docker.create_context_tar(tmp, d, os.path.join(tmp, 'tar'), 'test')
+ finally:
+ shutil.rmtree(tmp)
+
+ def test_create_context_extra_directory(self):
+ tmp = tempfile.mkdtemp()
+ try:
+ d = os.path.join(tmp, 'test-image')
+ os.mkdir(d)
+
+ with open(os.path.join(d, 'Dockerfile'), 'wb') as fh:
+ fh.write(b'# %include extra\n')
+ fh.write(b'# %include file0\n')
+ os.chmod(os.path.join(d, 'Dockerfile'), MODE_STANDARD)
+
+ extra = os.path.join(tmp, 'extra')
+ os.mkdir(extra)
+ for i in range(3):
+ p = os.path.join(extra, 'file%d' % i)
+ with open(p, 'wb') as fh:
+ fh.write(b'file%d' % i)
+ os.chmod(p, MODE_STANDARD)
+
+ with open(os.path.join(tmp, 'file0'), 'a'):
+ pass
+ os.chmod(os.path.join(tmp, 'file0'), MODE_STANDARD)
+
+ tp = os.path.join(tmp, 'tar')
+ h = docker.create_context_tar(tmp, d, tp, 'my_image')
+
+ self.assertEqual(h, 'e5440513ab46ae4c1d056269e1c6715d5da7d4bd673719d360411e35e5b87205')
+
+ with tarfile.open(tp, 'r:gz') as tf:
+ self.assertEqual(tf.getnames(), [
+ 'my_image/Dockerfile',
+ 'my_image/topsrcdir/extra/file0',
+ 'my_image/topsrcdir/extra/file1',
+ 'my_image/topsrcdir/extra/file2',
+ 'my_image/topsrcdir/file0',
+ ])
+ finally:
+ shutil.rmtree(tmp)
diff --git a/taskcluster/taskgraph/test/test_util_python_path.py b/taskcluster/taskgraph/test/test_util_python_path.py
new file mode 100644
index 000000000..9615d1347
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util_python_path.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+from ..util import python_path
+
+
+class TestObject(object):
+
+ testClassProperty = object()
+
+
+class TestPythonPath(unittest.TestCase):
+
+ def test_find_object_no_such_module(self):
+ """find_object raises ImportError for a nonexistent module"""
+ self.assertRaises(ImportError, python_path.find_object, "no_such_module:someobj")
+
+ def test_find_object_no_such_object(self):
+ """find_object raises AttributeError for a nonexistent object"""
+ self.assertRaises(AttributeError, python_path.find_object,
+ "taskgraph.test.test_util_python_path:NoSuchObject")
+
+ def test_find_object_exists(self):
+ """find_object finds an existing object"""
+ obj = python_path.find_object(
+ "taskgraph.test.test_util_python_path:TestObject.testClassProperty")
+ self.assertIs(obj, TestObject.testClassProperty)
diff --git a/taskcluster/taskgraph/test/test_util_templates.py b/taskcluster/taskgraph/test/test_util_templates.py
new file mode 100755
index 000000000..47f7494a0
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util_templates.py
@@ -0,0 +1,232 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+import mozunit
+import textwrap
+from taskgraph.util.templates import (
+ merge_to,
+ merge,
+ Templates,
+ TemplatesException
+)
+
+files = {}
+files['/fixtures/circular.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: 'circular_ref.yml'
+ variables:
+ woot: 'inherit'
+ """)
+
+files['/fixtures/inherit.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: 'templates.yml'
+ variables:
+ woot: 'inherit'
+ """)
+
+files['/fixtures/extend_child.yml'] = textwrap.dedent("""\
+ list: ['1', '2', '3']
+ was_list: ['1']
+ obj:
+ level: 1
+ deeper:
+ woot: 'bar'
+ list: ['baz']
+ """)
+
+files['/fixtures/circular_ref.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: 'circular.yml'
+ """)
+
+files['/fixtures/child_pass.yml'] = textwrap.dedent("""\
+ values:
+ - {{a}}
+ - {{b}}
+ - {{c}}
+ """)
+
+files['/fixtures/inherit_pass.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: 'child_pass.yml'
+ variables:
+ a: 'a'
+ b: 'b'
+ c: 'c'
+ """)
+
+files['/fixtures/deep/2.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: deep/1.yml
+
+ """)
+
+files['/fixtures/deep/3.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: deep/2.yml
+
+ """)
+
+files['/fixtures/deep/4.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: deep/3.yml
+ """)
+
+files['/fixtures/deep/1.yml'] = textwrap.dedent("""\
+ variable: {{value}}
+ """)
+
+files['/fixtures/simple.yml'] = textwrap.dedent("""\
+ is_simple: true
+ """)
+
+files['/fixtures/templates.yml'] = textwrap.dedent("""\
+ content: 'content'
+ variable: '{{woot}}'
+ """)
+
+files['/fixtures/extend_parent.yml'] = textwrap.dedent("""\
+ $inherits:
+ from: 'extend_child.yml'
+
+ list: ['4']
+ was_list:
+ replaced: true
+ obj:
+ level: 2
+ from_parent: true
+ deeper:
+ list: ['bar']
+ """)
+
+
+class TemplatesTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mocked_open = mozunit.MockedOpen(files)
+ self.mocked_open.__enter__()
+ self.subject = Templates('/fixtures')
+
+ def tearDown(self):
+ self.mocked_open.__exit__(None, None, None)
+
+ def test_invalid_path(self):
+ with self.assertRaisesRegexp(TemplatesException, 'must be a directory'):
+ Templates('/zomg/not/a/dir')
+
+ def test_no_templates(self):
+ content = self.subject.load('simple.yml', {})
+ self.assertEquals(content, {
+ 'is_simple': True
+ })
+
+ def test_with_templates(self):
+ content = self.subject.load('templates.yml', {
+ 'woot': 'bar'
+ })
+
+ self.assertEquals(content, {
+ 'content': 'content',
+ 'variable': 'bar'
+ })
+
+ def test_inheritance(self):
+ '''
+ The simple single pass inheritance case.
+ '''
+ content = self.subject.load('inherit.yml', {})
+ self.assertEqual(content, {
+ 'content': 'content',
+ 'variable': 'inherit'
+ })
+
+ def test_inheritance_implicat_pass(self):
+ '''
+ Implicitly pass parameters from the child to the ancestor.
+ '''
+ content = self.subject.load('inherit_pass.yml', {
+ 'a': 'overriden'
+ })
+
+ self.assertEqual(content, {'values': ['overriden', 'b', 'c']})
+
+ def test_inheritance_circular(self):
+ '''
+ Circular reference handling.
+ '''
+ with self.assertRaisesRegexp(TemplatesException, 'circular'):
+ self.subject.load('circular.yml', {})
+
+ def test_deep_inheritance(self):
+ content = self.subject.load('deep/4.yml', {
+ 'value': 'myvalue'
+ })
+ self.assertEqual(content, {'variable': 'myvalue'})
+
+ def test_inheritance_with_simple_extensions(self):
+ content = self.subject.load('extend_parent.yml', {})
+ self.assertEquals(content, {
+ 'list': ['1', '2', '3', '4'],
+ 'obj': {
+ 'from_parent': True,
+ 'deeper': {
+ 'woot': 'bar',
+ 'list': ['baz', 'bar']
+ },
+ 'level': 2,
+ },
+ 'was_list': {'replaced': True}
+ })
+
+
+class MergeTest(unittest.TestCase):
+
+ def test_merge_to_dicts(self):
+ source = {'a': 1, 'b': 2}
+ dest = {'b': '20', 'c': 30}
+ expected = {
+ 'a': 1, # source only
+ 'b': 2, # source overrides dest
+ 'c': 30, # dest only
+ }
+ self.assertEqual(merge_to(source, dest), expected)
+ self.assertEqual(dest, expected)
+
+ def test_merge_to_lists(self):
+ source = {'x': [3, 4]}
+ dest = {'x': [1, 2]}
+ expected = {'x': [1, 2, 3, 4]} # dest first
+ self.assertEqual(merge_to(source, dest), expected)
+ self.assertEqual(dest, expected)
+
+ def test_merge_diff_types(self):
+ source = {'x': [1, 2]}
+ dest = {'x': 'abc'}
+ expected = {'x': [1, 2]} # source wins
+ self.assertEqual(merge_to(source, dest), expected)
+ self.assertEqual(dest, expected)
+
+ def test_merge(self):
+ first = {'a': 1, 'b': 2, 'd': 11}
+ second = {'b': 20, 'c': 30}
+ third = {'c': 300, 'd': 400}
+ expected = {
+ 'a': 1,
+ 'b': 20,
+ 'c': 300,
+ 'd': 400,
+ }
+ self.assertEqual(merge(first, second, third), expected)
+
+ # inputs haven't changed..
+ self.assertEqual(first, {'a': 1, 'b': 2, 'd': 11})
+ self.assertEqual(second, {'b': 20, 'c': 30})
+ self.assertEqual(third, {'c': 300, 'd': 400})
+
+if __name__ == '__main__':
+ mozunit.main()
diff --git a/taskcluster/taskgraph/test/test_util_time.py b/taskcluster/taskgraph/test/test_util_time.py
new file mode 100755
index 000000000..f001c9d9c
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util_time.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+import mozunit
+from datetime import datetime
+from taskgraph.util.time import (
+ InvalidString,
+ UnknownTimeMeasurement,
+ value_of,
+ json_time_from_now
+)
+
+
+class FromNowTest(unittest.TestCase):
+
+ def test_invalid_str(self):
+ with self.assertRaises(InvalidString):
+ value_of('wtfs')
+
+ def test_missing_unit(self):
+ with self.assertRaises(InvalidString):
+ value_of('1')
+
+ def test_missing_unknown_unit(self):
+ with self.assertRaises(UnknownTimeMeasurement):
+ value_of('1z')
+
+ def test_value_of(self):
+ self.assertEqual(value_of('1s').total_seconds(), 1)
+ self.assertEqual(value_of('1 second').total_seconds(), 1)
+ self.assertEqual(value_of('1min').total_seconds(), 60)
+ self.assertEqual(value_of('1h').total_seconds(), 3600)
+ self.assertEqual(value_of('1d').total_seconds(), 86400)
+ self.assertEqual(value_of('1mo').total_seconds(), 2592000)
+ self.assertEqual(value_of('1 month').total_seconds(), 2592000)
+ self.assertEqual(value_of('1y').total_seconds(), 31536000)
+
+ with self.assertRaises(UnknownTimeMeasurement):
+ value_of('1m').total_seconds() # ambiguous between minute and month
+
+ def test_json_from_now_utc_now(self):
+ # Just here to ensure we don't raise.
+ json_time_from_now('1 years')
+
+ def test_json_from_now(self):
+ now = datetime(2014, 1, 1)
+ self.assertEqual(json_time_from_now('1 years', now),
+ '2015-01-01T00:00:00Z')
+ self.assertEqual(json_time_from_now('6 days', now),
+ '2014-01-07T00:00:00Z')
+
+if __name__ == '__main__':
+ mozunit.main()
diff --git a/taskcluster/taskgraph/test/test_util_treeherder.py b/taskcluster/taskgraph/test/test_util_treeherder.py
new file mode 100644
index 000000000..cf7513c00
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util_treeherder.py
@@ -0,0 +1,23 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+from taskgraph.util.treeherder import split_symbol, join_symbol
+
+
+class TestSymbols(unittest.TestCase):
+
+ def test_split_no_group(self):
+ self.assertEqual(split_symbol('xy'), ('?', 'xy'))
+
+ def test_split_with_group(self):
+ self.assertEqual(split_symbol('ab(xy)'), ('ab', 'xy'))
+
+ def test_join_no_group(self):
+ self.assertEqual(join_symbol('?', 'xy'), 'xy')
+
+ def test_join_with_group(self):
+ self.assertEqual(join_symbol('ab', 'xy'), 'ab(xy)')
diff --git a/taskcluster/taskgraph/test/test_util_yaml.py b/taskcluster/taskgraph/test/test_util_yaml.py
new file mode 100644
index 000000000..d4ff410db
--- /dev/null
+++ b/taskcluster/taskgraph/test/test_util_yaml.py
@@ -0,0 +1,23 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import unittest
+
+from ..util import yaml
+from mozunit import MockedOpen
+
+FOO_YML = """\
+prop:
+ - val1
+"""
+
+
+class TestYaml(unittest.TestCase):
+
+ def test_load(self):
+ with MockedOpen({'/dir1/dir2/foo.yml': FOO_YML}):
+ self.assertEqual(yaml.load_yaml("/dir1/dir2", "foo.yml"),
+ {'prop': ['val1']})
diff --git a/taskcluster/taskgraph/test/util.py b/taskcluster/taskgraph/test/util.py
new file mode 100644
index 000000000..cf9a49ad3
--- /dev/null
+++ b/taskcluster/taskgraph/test/util.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from ..task import base
+
+
+class TestTask(base.Task):
+
+ def __init__(self, kind=None, label=None, attributes=None, task=None):
+ super(TestTask, self).__init__(
+ kind or 'test',
+ label or 'test-label',
+ attributes or {},
+ task or {})
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, parameters):
+ return []
+
+ def get_dependencies(self, taskgraph):
+ return []
diff --git a/taskcluster/taskgraph/transforms/__init__.py b/taskcluster/taskgraph/transforms/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/__init__.py
diff --git a/taskcluster/taskgraph/transforms/android_stuff.py b/taskcluster/taskgraph/transforms/android_stuff.py
new file mode 100644
index 000000000..cb1e0fa5b
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/android_stuff.py
@@ -0,0 +1,46 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Set dynamic task description properties of the android stuff. Temporary!
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import TransformSequence
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def setup_task(config, tasks):
+ for task in tasks:
+ task['label'] = task['name']
+ env = task['worker'].setdefault('env', {})
+ env.update({
+ 'GECKO_BASE_REPOSITORY': config.params['base_repository'],
+ 'GECKO_HEAD_REF': config.params['head_rev'],
+ 'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
+ 'GECKO_HEAD_REV': config.params['head_rev'],
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ 'MH_BRANCH': config.params['project'],
+ })
+
+ task['worker'].setdefault('caches', []).append({
+ 'type': 'persistent',
+ 'name': 'level-{}-{}-tc-vcs'.format(
+ config.params['level'], config.params['project']),
+ 'mount-point': "/home/worker/.tc-vcs",
+ })
+
+ if int(config.params['level']) > 1:
+ task['worker'].setdefault('caches', []).append({
+ 'type': 'persistent',
+ 'name': 'level-{}-{}-build-{}-workspace'.format(
+ config.params['level'], config.params['project'], task['name']),
+ 'mount-point': "/home/worker/workspace",
+ })
+
+ del task['name']
+ yield task
diff --git a/taskcluster/taskgraph/transforms/base.py b/taskcluster/taskgraph/transforms/base.py
new file mode 100644
index 000000000..aab139252
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/base.py
@@ -0,0 +1,126 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import re
+import pprint
+import voluptuous
+
+
+class TransformConfig(object):
+ """A container for configuration affecting transforms. The `config`
+ argument to transforms is an instance of this class, possibly with
+ additional kind-specific attributes beyond those set here."""
+ def __init__(self, kind, path, config, params):
+ # the name of the current kind
+ self.kind = kind
+
+ # the path to the kind configuration directory
+ self.path = path
+
+ # the parsed contents of kind.yml
+ self.config = config
+
+ # the parameters for this task-graph generation run
+ self.params = params
+
+
+class TransformSequence(object):
+ """
+ Container for a sequence of transforms. Each transform is represented as a
+ callable taking (config, items) and returning a generator which will yield
+ transformed items. The resulting sequence has the same interface.
+
+ This is convenient to use in a file full of transforms, as it provides a
+ decorator, @transforms.add, that will add the decorated function to the
+ sequence.
+ """
+
+ def __init__(self, transforms=None):
+ self.transforms = transforms or []
+
+ def __call__(self, config, items):
+ for xform in self.transforms:
+ items = xform(config, items)
+ if items is None:
+ raise Exception("Transform {} is not a generator".format(xform))
+ return items
+
+ def __repr__(self):
+ return '\n'.join(
+ ['TransformSequence(['] +
+ [repr(x) for x in self.transforms] +
+ ['])'])
+
+ def add(self, func):
+ self.transforms.append(func)
+ return func
+
+
+def validate_schema(schema, obj, msg_prefix):
+ """
+ Validate that object satisfies schema. If not, generate a useful exception
+ beginning with msg_prefix.
+ """
+ try:
+ return schema(obj)
+ except voluptuous.MultipleInvalid as exc:
+ msg = [msg_prefix]
+ for error in exc.errors:
+ msg.append(str(error))
+ raise Exception('\n'.join(msg) + '\n' + pprint.pformat(obj))
+
+
+def get_keyed_by(item, field, item_name, subfield=None):
+ """
+ For values which can either accept a literal value, or be keyed by some
+ other attribute of the item, perform that lookup. For example, this supports
+
+ chunks:
+ by-test-platform:
+ macosx-10.11/debug: 13
+ win.*: 6
+ default: 12
+
+ The `item_name` parameter is used to generate useful error messages.
+ The `subfield` parameter, if specified, allows access to a second level
+ of the item dictionary: item[field][subfield]. For example, this supports
+
+ mozharness:
+ config:
+ by-test-platform:
+ default: ...
+ """
+ value = item[field]
+ if not isinstance(value, dict):
+ return value
+ if subfield:
+ value = item[field][subfield]
+ if not isinstance(value, dict):
+ return value
+
+ assert len(value) == 1, "Invalid attribute {} in {}".format(field, item_name)
+ keyed_by = value.keys()[0]
+ values = value[keyed_by]
+ if keyed_by.startswith('by-'):
+ keyed_by = keyed_by[3:] # extract just the keyed-by field name
+ if item[keyed_by] in values:
+ return values[item[keyed_by]]
+ for k in values.keys():
+ if re.match(k, item[keyed_by]):
+ return values[k]
+ if 'default' in values:
+ return values['default']
+ for k in item[keyed_by], 'default':
+ if k in values:
+ return values[k]
+ else:
+ raise Exception(
+ "Neither {} {} nor 'default' found while determining item {} in {}".format(
+ keyed_by, item[keyed_by], field, item_name))
+ else:
+ raise Exception(
+ "Invalid attribute {} keyed-by value {} in {}".format(
+ field, keyed_by, item_name))
diff --git a/taskcluster/taskgraph/transforms/build.py b/taskcluster/taskgraph/transforms/build.py
new file mode 100644
index 000000000..3875cbbb1
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/build.py
@@ -0,0 +1,31 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Apply some defaults and minor modifications to the jobs defined in the build
+kind.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import TransformSequence
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def set_defaults(config, jobs):
+ """Set defaults, including those that differ per worker implementation"""
+ for job in jobs:
+ job['treeherder'].setdefault('kind', 'build')
+ job['treeherder'].setdefault('tier', 1)
+ if job['worker']['implementation'] in ('docker-worker', 'docker-engine'):
+ job['worker'].setdefault('docker-image', {'in-tree': 'desktop-build'})
+ job['worker']['chain-of-trust'] = True
+ job.setdefault('extra', {})
+ job['extra'].setdefault('chainOfTrust', {})
+ job['extra']['chainOfTrust'].setdefault('inputs', {})
+ job['extra']['chainOfTrust']['inputs']['docker-image'] = {
+ "task-reference": "<docker-image>"
+ }
+ yield job
diff --git a/taskcluster/taskgraph/transforms/build_attrs.py b/taskcluster/taskgraph/transforms/build_attrs.py
new file mode 100644
index 000000000..56c007614
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/build_attrs.py
@@ -0,0 +1,33 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import TransformSequence
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def set_build_attributes(config, jobs):
+ """
+ Set the build_platform and build_type attributes based on the job name.
+ Although not all jobs using this transform are actual "builds", the try
+ option syntax treats them as such, and this arranges the attributes
+ appropriately for that purpose.
+ """
+ for job in jobs:
+ build_platform, build_type = job['name'].split('/')
+
+ # pgo builds are represented as a different platform, type opt
+ if build_type == 'pgo':
+ build_platform = build_platform + '-pgo'
+ build_type = 'opt'
+
+ attributes = job.setdefault('attributes', {})
+ attributes.update({
+ 'build_platform': build_platform,
+ 'build_type': build_type,
+ })
+
+ yield job
diff --git a/taskcluster/taskgraph/transforms/gecko_v2_whitelist.py b/taskcluster/taskgraph/transforms/gecko_v2_whitelist.py
new file mode 100644
index 000000000..3817faa50
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/gecko_v2_whitelist.py
@@ -0,0 +1,77 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+This file contains a whitelist of gecko.v2 index route job names. The intent
+of this whitelist is to raise an alarm when new jobs are added. If those jobs
+already run in Buildbot, then it's important that the generated index routes
+match (and that only one of Buildbot and TaskCluster be tier-1 at any time).
+If the jobs are new and never ran in Buildbot, then their job name can be added
+here without any further fuss.
+
+Once all jobs have been ported from Buildbot, this file can be removed.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+# please keep me in lexical order
+JOB_NAME_WHITELIST = set([
+ 'android-api-15-debug',
+ 'android-api-15-gradle-dependencies-opt',
+ 'android-api-15-gradle-opt',
+ 'android-api-15-opt',
+ 'android-api-15-nightly-opt',
+ 'android-api-15-partner-sample1-opt',
+ 'android-l10n-opt',
+ 'android-x86-opt',
+ 'aries-debug',
+ 'aries-eng-opt',
+ 'browser-haz-debug',
+ 'linux32-l10n-opt',
+ 'linux64-artifact-opt',
+ 'linux64-asan-debug',
+ 'linux64-asan-opt',
+ 'linux64-ccov-opt',
+ 'linux64-debug',
+ 'linux64-jsdcov-opt',
+ 'linux64-l10n-opt',
+ 'linux64-opt',
+ 'linux64-pgo',
+ 'linux64-st-an-opt',
+ 'linux64-valgrind-opt',
+ 'linux-debug',
+ 'linux-opt',
+ 'linux-pgo',
+ 'macosx64-debug',
+ 'macosx64-opt',
+ 'macosx64-st-an-opt',
+ 'nexus-5-l-eng-debug',
+ 'nexus-5-l-eng-opt',
+ 'osx-10-10',
+ 'shell-haz-debug',
+ 'sm-arm64-sim-debug',
+ 'sm-arm-sim-debug',
+ 'sm-asan-opt',
+ 'sm-compacting-debug',
+ 'sm-mozjs-sys-debug',
+ 'sm-msan-opt',
+ 'sm-nonunified-debug',
+ 'sm-package-opt',
+ 'sm-plaindebug-debug',
+ 'sm-plain-opt',
+ 'sm-rootanalysis-debug',
+ 'sm-tsan-opt',
+ 'win32-debug',
+ 'win32-opt',
+ 'win32-pgo',
+ 'win64-debug',
+ 'win64-opt',
+ 'win64-pgo',
+])
+
+JOB_NAME_WHITELIST_ERROR = """\
+The gecko-v2 job name {} is not in the whitelist in __file__.
+If this job runs on Buildbot, please ensure that the job names match between
+Buildbot and TaskCluster, then add the job name to the whitelist. If this is a
+new job, there is nothing to check -- just add the job to the whitelist.
+"""
diff --git a/taskcluster/taskgraph/transforms/job/__init__.py b/taskcluster/taskgraph/transforms/job/__init__.py
new file mode 100644
index 000000000..a0860c032
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/__init__.py
@@ -0,0 +1,164 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Convert a job description into a task description.
+
+Jobs descriptions are similar to task descriptions, but they specify how to run
+the job at a higher level, using a "run" field that can be interpreted by
+run-using handlers in `taskcluster/taskgraph/transforms/job`.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import copy
+import logging
+import os
+
+from taskgraph.transforms.base import validate_schema, TransformSequence
+from taskgraph.transforms.task import task_description_schema
+from voluptuous import (
+ Optional,
+ Required,
+ Schema,
+ Extra,
+)
+
+logger = logging.getLogger(__name__)
+
+# Voluptuous uses marker objects as dictionary *keys*, but they are not
+# comparable, so we cast all of the keys back to regular strings
+task_description_schema = {str(k): v for k, v in task_description_schema.schema.iteritems()}
+
+# Schema for a build description
+job_description_schema = Schema({
+ # The name of the job and the job's label. At least one must be specified,
+ # and the label will be generated from the name if necessary, by prepending
+ # the kind.
+ Optional('name'): basestring,
+ Optional('label'): basestring,
+
+ # the following fields are passed directly through to the task description,
+ # possibly modified by the run implementation. See
+ # taskcluster/taskgraph/transforms/task.py for the schema details.
+ Required('description'): task_description_schema['description'],
+ Optional('attributes'): task_description_schema['attributes'],
+ Optional('dependencies'): task_description_schema['dependencies'],
+ Optional('expires-after'): task_description_schema['expires-after'],
+ Optional('routes'): task_description_schema['routes'],
+ Optional('scopes'): task_description_schema['scopes'],
+ Optional('extra'): task_description_schema['extra'],
+ Optional('treeherder'): task_description_schema['treeherder'],
+ Optional('index'): task_description_schema['index'],
+ Optional('run-on-projects'): task_description_schema['run-on-projects'],
+ Optional('coalesce-name'): task_description_schema['coalesce-name'],
+ Optional('worker-type'): task_description_schema['worker-type'],
+ Required('worker'): task_description_schema['worker'],
+ Optional('when'): task_description_schema['when'],
+
+ # A description of how to run this job.
+ 'run': {
+ # The key to a job implementation in a peer module to this one
+ 'using': basestring,
+
+ # Any remaining content is verified against that job implementation's
+ # own schema.
+ Extra: object,
+ },
+})
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def validate(config, jobs):
+ for job in jobs:
+ yield validate_schema(job_description_schema, job,
+ "In job {!r}:".format(job['name']))
+
+
+@transforms.add
+def make_task_description(config, jobs):
+ """Given a build description, create a task description"""
+ # import plugin modules first, before iterating over jobs
+ import_all()
+ for job in jobs:
+ if 'label' not in job:
+ if 'name' not in job:
+ raise Exception("job has neither a name nor a label")
+ job['label'] = '{}-{}'.format(config.kind, job['name'])
+ if job['name']:
+ del job['name']
+
+ taskdesc = copy.deepcopy(job)
+
+ # fill in some empty defaults to make run implementations easier
+ taskdesc.setdefault('attributes', {})
+ taskdesc.setdefault('dependencies', {})
+ taskdesc.setdefault('routes', [])
+ taskdesc.setdefault('scopes', [])
+ taskdesc.setdefault('extra', {})
+
+ # give the function for job.run.using on this worker implementation a
+ # chance to set up the task description.
+ configure_taskdesc_for_run(config, job, taskdesc)
+ del taskdesc['run']
+
+ # yield only the task description, discarding the job description
+ yield taskdesc
+
+# A registry of all functions decorated with run_job_using
+registry = {}
+
+
+def run_job_using(worker_implementation, run_using, schema=None):
+ """Register the decorated function as able to set up a task description for
+ jobs with the given worker implementation and `run.using` property. If
+ `schema` is given, the job's run field will be verified to match it.
+
+ The decorated function should have the signature `using_foo(config, job,
+ taskdesc) and should modify the task description in-place. The skeleton of
+ the task description is already set up, but without a payload."""
+ def wrap(func):
+ for_run_using = registry.setdefault(run_using, {})
+ if worker_implementation in for_run_using:
+ raise Exception("run_job_using({!r}, {!r}) already exists: {!r}".format(
+ run_using, worker_implementation, for_run_using[run_using]))
+ for_run_using[worker_implementation] = (func, schema)
+ return func
+ return wrap
+
+
+def configure_taskdesc_for_run(config, job, taskdesc):
+ """
+ Run the appropriate function for this job against the given task
+ description.
+
+ This will raise an appropriate error if no function exists, or if the job's
+ run is not valid according to the schema.
+ """
+ run_using = job['run']['using']
+ if run_using not in registry:
+ raise Exception("no functions for run.using {!r}".format(run_using))
+
+ worker_implementation = job['worker']['implementation']
+ if worker_implementation not in registry[run_using]:
+ raise Exception("no functions for run.using {!r} on {!r}".format(
+ run_using, worker_implementation))
+
+ func, schema = registry[run_using][worker_implementation]
+ if schema:
+ job['run'] = validate_schema(
+ schema, job['run'],
+ "In job.run using {!r} for job {!r}:".format(
+ job['run']['using'], job['label']))
+
+ func(config, job, taskdesc)
+
+
+def import_all():
+ """Import all modules that are siblings of this one, triggering the decorator
+ above in the process."""
+ for f in os.listdir(os.path.dirname(__file__)):
+ if f.endswith('.py') and f not in ('commmon.py', '__init__.py'):
+ __import__('taskgraph.transforms.job.' + f[:-3])
diff --git a/taskcluster/taskgraph/transforms/job/common.py b/taskcluster/taskgraph/transforms/job/common.py
new file mode 100644
index 000000000..59a51d75a
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/common.py
@@ -0,0 +1,108 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Common support for various job types. These functions are all named after the
+worker implementation they operate on, and take the same three parameters, for
+consistency.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+SECRET_SCOPE = 'secrets:get:project/releng/gecko/{}/level-{}/{}'
+
+
+def docker_worker_add_workspace_cache(config, job, taskdesc):
+ """Add the workspace cache based on the build platform/type and level,
+ except on try where workspace caches are not used."""
+ if config.params['project'] == 'try':
+ return
+
+ taskdesc['worker'].setdefault('caches', []).append({
+ 'type': 'persistent',
+ 'name': 'level-{}-{}-build-{}-{}-workspace'.format(
+ config.params['level'], config.params['project'],
+ taskdesc['attributes']['build_platform'],
+ taskdesc['attributes']['build_type'],
+ ),
+ 'mount-point': "/home/worker/workspace",
+ })
+
+
+def docker_worker_add_tc_vcs_cache(config, job, taskdesc):
+ taskdesc['worker'].setdefault('caches', []).append({
+ 'type': 'persistent',
+ 'name': 'level-{}-{}-tc-vcs'.format(
+ config.params['level'], config.params['project']),
+ 'mount-point': "/home/worker/.tc-vcs",
+ })
+
+
+def docker_worker_add_public_artifacts(config, job, taskdesc):
+ taskdesc['worker'].setdefault('artifacts', []).append({
+ 'name': 'public/build',
+ 'path': '/home/worker/artifacts/',
+ 'type': 'directory',
+ })
+
+
+def docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc):
+ """Add the GECKO_BASE_* and GECKO_HEAD_* env vars to the worker."""
+ env = taskdesc['worker'].setdefault('env', {})
+ env.update({
+ 'GECKO_BASE_REPOSITORY': config.params['base_repository'],
+ 'GECKO_HEAD_REF': config.params['head_rev'],
+ 'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
+ 'GECKO_HEAD_REV': config.params['head_rev'],
+ })
+
+
+def docker_worker_support_vcs_checkout(config, job, taskdesc):
+ """Update a job/task with parameters to enable a VCS checkout.
+
+ The configuration is intended for tasks using "run-task" and its
+ VCS checkout behavior.
+ """
+ level = config.params['level']
+
+ taskdesc['worker'].setdefault('caches', []).append({
+ 'type': 'persistent',
+ # History of versions:
+ #
+ # ``level-%s-checkouts`` was initially used and contained a number
+ # of backwards incompatible changes, such as moving HG_STORE_PATH
+ # from a separate cache to this cache.
+ #
+ # ``v1`` was introduced to provide a clean break from the unversioned
+ # cache.
+ 'name': 'level-%s-checkouts-v1' % level,
+ 'mount-point': '/home/worker/checkouts',
+ })
+
+ taskdesc['worker'].setdefault('env', {}).update({
+ 'GECKO_BASE_REPOSITORY': config.params['base_repository'],
+ 'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
+ 'GECKO_HEAD_REV': config.params['head_rev'],
+ 'HG_STORE_PATH': '/home/worker/checkouts/hg-store',
+ })
+
+ # Give task access to hgfingerprint secret so it can pin the certificate
+ # for hg.mozilla.org.
+ taskdesc['scopes'].append('secrets:get:project/taskcluster/gecko/hgfingerprint')
+ taskdesc['worker']['taskcluster-proxy'] = True
+
+
+def docker_worker_setup_secrets(config, job, taskdesc):
+ """Set up access to secrets via taskcluster-proxy. The value of
+ run['secrets'] should be a boolean or a list of secret names that
+ can be accessed."""
+ if not job['run'].get('secrets'):
+ return
+
+ taskdesc['worker']['taskcluster-proxy'] = True
+ secrets = job['run']['secrets']
+ if secrets is True:
+ secrets = ['*']
+ for sec in secrets:
+ taskdesc['scopes'].append(SECRET_SCOPE.format(
+ job['treeherder']['kind'], config.params['level'], sec))
diff --git a/taskcluster/taskgraph/transforms/job/hazard.py b/taskcluster/taskgraph/transforms/job/hazard.py
new file mode 100644
index 000000000..c5b500843
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/hazard.py
@@ -0,0 +1,91 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Support for running hazard jobs via dedicated scripts
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from voluptuous import Schema, Required, Optional, Any
+
+from taskgraph.transforms.job import run_job_using
+from taskgraph.transforms.job.common import (
+ docker_worker_add_workspace_cache,
+ docker_worker_setup_secrets,
+ docker_worker_add_public_artifacts,
+ docker_worker_support_vcs_checkout,
+)
+
+haz_run_schema = Schema({
+ Required('using'): 'hazard',
+
+ # The command to run within the task image (passed through to the worker)
+ Required('command'): basestring,
+
+ # The tooltool manifest to use; default in the script is used if omitted
+ Optional('tooltool-manifest'): basestring,
+
+ # The mozconfig to use; default in the script is used if omitted
+ Optional('mozconfig'): basestring,
+
+ # The set of secret names to which the task has access; these are prefixed
+ # with `project/releng/gecko/{treeherder.kind}/level-{level}/`. Setting
+ # this will enable any worker features required and set the task's scopes
+ # appropriately. `true` here means ['*'], all secrets. Not supported on
+ # Windows
+ Required('secrets', default=False): Any(bool, [basestring]),
+})
+
+
+@run_job_using("docker-worker", "hazard", schema=haz_run_schema)
+def docker_worker_hazard(config, job, taskdesc):
+ run = job['run']
+
+ worker = taskdesc['worker']
+ worker['artifacts'] = []
+ worker['caches'] = []
+
+ docker_worker_add_public_artifacts(config, job, taskdesc)
+ docker_worker_add_workspace_cache(config, job, taskdesc)
+ docker_worker_setup_secrets(config, job, taskdesc)
+ docker_worker_support_vcs_checkout(config, job, taskdesc)
+
+ env = worker['env']
+ env.update({
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ })
+
+ # script parameters
+ if run.get('tooltool-manifest'):
+ env['TOOLTOOL_MANIFEST'] = run['tooltool-manifest']
+ if run.get('mozconfig'):
+ env['MOZCONFIG'] = run['mozconfig']
+
+ # tooltool downloads
+ worker['caches'].append({
+ 'type': 'persistent',
+ 'name': 'tooltool-cache',
+ 'mount-point': '/home/worker/tooltool-cache',
+ })
+ worker['relengapi-proxy'] = True
+ taskdesc['scopes'].extend([
+ 'docker-worker:relengapi-proxy:tooltool.download.public',
+ ])
+ env['TOOLTOOL_CACHE'] = '/home/worker/tooltool-cache'
+ env['TOOLTOOL_REPO'] = 'https://github.com/mozilla/build-tooltool'
+ env['TOOLTOOL_REV'] = 'master'
+
+ # build-haz-linux.sh needs this otherwise it assumes the checkout is in
+ # the workspace.
+ env['GECKO_DIR'] = '/home/worker/checkouts/gecko'
+
+ worker['command'] = [
+ '/home/worker/bin/run-task',
+ '--chown-recursive', '/home/worker/tooltool-cache',
+ '--chown-recursive', '/home/worker/workspace',
+ '--vcs-checkout', '/home/worker/checkouts/gecko',
+ '--',
+ '/bin/bash', '-c', run['command']
+ ]
diff --git a/taskcluster/taskgraph/transforms/job/mach.py b/taskcluster/taskgraph/transforms/job/mach.py
new file mode 100644
index 000000000..8df202dbc
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/mach.py
@@ -0,0 +1,30 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Support for running mach tasks (via run-task)
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.job import run_job_using
+from taskgraph.transforms.job.run_task import docker_worker_run_task
+from voluptuous import Schema, Required
+
+mach_schema = Schema({
+ Required('using'): 'mach',
+
+ # The mach command (omitting `./mach`) to run
+ Required('mach'): basestring,
+})
+
+
+@run_job_using("docker-worker", "mach", schema=mach_schema)
+def docker_worker_mach(config, job, taskdesc):
+ run = job['run']
+
+ # defer to the run_task implementation
+ run['command'] = 'cd /home/worker/checkouts/gecko && ./mach ' + run['mach']
+ run['checkout'] = True
+ del run['mach']
+ docker_worker_run_task(config, job, taskdesc)
diff --git a/taskcluster/taskgraph/transforms/job/mozharness.py b/taskcluster/taskgraph/transforms/job/mozharness.py
new file mode 100644
index 000000000..fb3cd00dd
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/mozharness.py
@@ -0,0 +1,226 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+
+Support for running jobs via mozharness. Ideally, most stuff gets run this
+way, and certainly anything using mozharness should use this approach.
+
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from voluptuous import Schema, Required, Optional, Any
+
+from taskgraph.transforms.job import run_job_using
+from taskgraph.transforms.job.common import (
+ docker_worker_add_workspace_cache,
+ docker_worker_add_gecko_vcs_env_vars,
+ docker_worker_setup_secrets,
+ docker_worker_add_public_artifacts,
+ docker_worker_support_vcs_checkout,
+)
+
+COALESCE_KEY = 'builds.{project}.{name}'
+
+mozharness_run_schema = Schema({
+ Required('using'): 'mozharness',
+
+ # the mozharness script used to run this task, relative to the testing/
+ # directory and using forward slashes even on Windows
+ Required('script'): basestring,
+
+ # the config files required for the task, relative to
+ # testing/mozharness/configs and using forward slashes even on Windows
+ Required('config'): [basestring],
+
+ # any additional actions to pass to the mozharness command; not supported
+ # on Windows
+ Optional('actions'): [basestring],
+
+ # any additional options (without leading --) to be passed to mozharness;
+ # not supported on Windows
+ Optional('options'): [basestring],
+
+ # --custom-build-variant-cfg value (not supported on Windows)
+ Optional('custom-build-variant-cfg'): basestring,
+
+ # If not false, tooltool downloads will be enabled via relengAPIProxy
+ # for either just public files, or all files. Not supported on Windows
+ Required('tooltool-downloads', default=False): Any(
+ False,
+ 'public',
+ 'internal',
+ ),
+
+ # The set of secret names to which the task has access; these are prefixed
+ # with `project/releng/gecko/{treeherder.kind}/level-{level}/`. Setting
+ # this will enable any worker features required and set the task's scopes
+ # appropriately. `true` here means ['*'], all secrets. Not supported on
+ # Windows
+ Required('secrets', default=False): Any(bool, [basestring]),
+
+ # If true, taskcluster proxy will be enabled; note that it may also be enabled
+ # automatically e.g., for secrets support. Not supported on Windows.
+ Required('taskcluster-proxy', default=False): bool,
+
+ # If true, the build scripts will start Xvfb. Not supported on Windows.
+ Required('need-xvfb', default=False): bool,
+
+ # If false, indicate that builds should skip producing artifacts. Not
+ # supported on Windows.
+ Required('keep-artifacts', default=True): bool,
+
+ # If specified, use the in-tree job script specified.
+ Optional('job-script'): basestring,
+})
+
+
+@run_job_using("docker-worker", "mozharness", schema=mozharness_run_schema)
+def mozharness_on_docker_worker_setup(config, job, taskdesc):
+ run = job['run']
+
+ worker = taskdesc['worker']
+ worker['implementation'] = job['worker']['implementation']
+
+ # running via mozharness assumes desktop-build (which contains build.sh)
+ taskdesc['worker']['docker-image'] = {"in-tree": "desktop-build"}
+
+ worker['relengapi-proxy'] = False # but maybe enabled for tooltool below
+ worker['taskcluster-proxy'] = run.get('taskcluster-proxy')
+
+ docker_worker_add_public_artifacts(config, job, taskdesc)
+ docker_worker_add_workspace_cache(config, job, taskdesc)
+ docker_worker_support_vcs_checkout(config, job, taskdesc)
+
+ env = worker.setdefault('env', {})
+ env.update({
+ 'MOZHARNESS_CONFIG': ' '.join(run['config']),
+ 'MOZHARNESS_SCRIPT': run['script'],
+ 'MH_BRANCH': config.params['project'],
+ 'MH_BUILD_POOL': 'taskcluster',
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ })
+
+ if 'actions' in run:
+ env['MOZHARNESS_ACTIONS'] = ' '.join(run['actions'])
+
+ if 'options' in run:
+ env['MOZHARNESS_OPTIONS'] = ' '.join(run['options'])
+
+ if 'custom-build-variant-cfg' in run:
+ env['MH_CUSTOM_BUILD_VARIANT_CFG'] = run['custom-build-variant-cfg']
+
+ if 'job-script' in run:
+ env['JOB_SCRIPT'] = run['job-script']
+
+ # if we're not keeping artifacts, set some env variables to empty values
+ # that will cause the build process to skip copying the results to the
+ # artifacts directory. This will have no effect for operations that are
+ # not builds.
+ if not run['keep-artifacts']:
+ env['DIST_TARGET_UPLOADS'] = ''
+ env['DIST_UPLOADS'] = ''
+
+ # Xvfb
+ if run['need-xvfb']:
+ env['NEED_XVFB'] = 'true'
+
+ # tooltool downloads
+ if run['tooltool-downloads']:
+ worker['relengapi-proxy'] = True
+ worker['caches'].append({
+ 'type': 'persistent',
+ 'name': 'tooltool-cache',
+ 'mount-point': '/home/worker/tooltool-cache',
+ })
+ taskdesc['scopes'].extend([
+ 'docker-worker:relengapi-proxy:tooltool.download.public',
+ ])
+ if run['tooltool-downloads'] == 'internal':
+ taskdesc['scopes'].append(
+ 'docker-worker:relengapi-proxy:tooltool.download.internal')
+ env['TOOLTOOL_CACHE'] = '/home/worker/tooltool-cache'
+ env['TOOLTOOL_REPO'] = 'https://github.com/mozilla/build-tooltool'
+ env['TOOLTOOL_REV'] = 'master'
+
+ # Retry if mozharness returns TBPL_RETRY
+ worker['retry-exit-status'] = 4
+
+ docker_worker_setup_secrets(config, job, taskdesc)
+
+ command = [
+ '/home/worker/bin/run-task',
+ # Various caches/volumes are default owned by root:root.
+ '--chown-recursive', '/home/worker/workspace',
+ '--chown-recursive', '/home/worker/tooltool-cache',
+ '--vcs-checkout', '/home/worker/workspace/build/src',
+ '--tools-checkout', '/home/worker/workspace/build/tools',
+ '--',
+ ]
+ command.append("/home/worker/workspace/build/src/{}".format(
+ run.get('job-script',
+ "taskcluster/scripts/builder/build-linux.sh"
+ )))
+
+ worker['command'] = command
+
+
+# We use the generic worker to run tasks on Windows
+@run_job_using("generic-worker", "mozharness", schema=mozharness_run_schema)
+def mozharness_on_windows(config, job, taskdesc):
+ run = job['run']
+
+ # fail if invalid run options are included
+ invalid = []
+ for prop in ['actions', 'custom-build-variant-cfg',
+ 'tooltool-downloads', 'secrets', 'taskcluster-proxy',
+ 'need-xvfb']:
+ if prop in run and run[prop]:
+ invalid.append(prop)
+ if not run.get('keep-artifacts', True):
+ invalid.append('keep-artifacts')
+ if invalid:
+ raise Exception("Jobs run using mozharness on Windows do not support properties " +
+ ', '.join(invalid))
+
+ worker = taskdesc['worker']
+
+ worker['artifacts'] = [{
+ 'path': r'public\build',
+ 'type': 'directory',
+ }]
+
+ docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc)
+
+ env = worker['env']
+ env.update({
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ 'TOOLTOOL_REPO': 'https://github.com/mozilla/build-tooltool',
+ 'TOOLTOOL_REV': 'master',
+ })
+
+ mh_command = [r'c:\mozilla-build\python\python.exe']
+ mh_command.append('\\'.join([r'.\build\src\testing', run['script'].replace('/', '\\')]))
+ for cfg in run['config']:
+ mh_command.append('--config ' + cfg.replace('/', '\\'))
+ mh_command.append('--branch ' + config.params['project'])
+ mh_command.append(r'--skip-buildbot-actions --work-dir %cd:Z:=z:%\build')
+ for option in run.get('options', []):
+ mh_command.append('--' + option)
+
+ hg_command = ['"c:\\Program Files\\Mercurial\\hg.exe"']
+ hg_command.append('robustcheckout')
+ hg_command.extend(['--sharebase', 'y:\\hg-shared'])
+ hg_command.append('--purge')
+ hg_command.extend(['--upstream', 'https://hg.mozilla.org/mozilla-unified'])
+ hg_command.extend(['--revision', env['GECKO_HEAD_REV']])
+ hg_command.append(env['GECKO_HEAD_REPOSITORY'])
+ hg_command.append('.\\build\\src')
+
+ worker['command'] = [
+ ' '.join(hg_command),
+ ' '.join(mh_command)
+ ]
diff --git a/taskcluster/taskgraph/transforms/job/run_task.py b/taskcluster/taskgraph/transforms/job/run_task.py
new file mode 100644
index 000000000..296fe43ee
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/run_task.py
@@ -0,0 +1,59 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Support for running jobs that are invoked via the `run-task` script.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import copy
+
+from taskgraph.transforms.job import run_job_using
+from taskgraph.transforms.job.common import (
+ docker_worker_support_vcs_checkout,
+)
+from voluptuous import Schema, Required, Any
+
+run_task_schema = Schema({
+ Required('using'): 'run-task',
+
+ # if true, add a cache at ~worker/.cache, which is where things like pip
+ # tend to hide their caches. This cache is never added for level-1 jobs.
+ Required('cache-dotcache', default=False): bool,
+
+ # if true (the default), perform a checkout in /home/worker/checkouts/gecko
+ Required('checkout', default=True): bool,
+
+ # The command arguments to pass to the `run-task` script, after the
+ # checkout arguments. If a list, it will be passed directly; otherwise
+ # it will be included in a single argument to `bash -cx`.
+ Required('command'): Any([basestring], basestring),
+})
+
+
+@run_job_using("docker-worker", "run-task", schema=run_task_schema)
+def docker_worker_run_task(config, job, taskdesc):
+ run = job['run']
+
+ worker = taskdesc['worker'] = copy.deepcopy(job['worker'])
+
+ if run['checkout']:
+ docker_worker_support_vcs_checkout(config, job, taskdesc)
+
+ if run.get('cache-dotcache') and int(config.params['level']) > 1:
+ worker['caches'].append({
+ 'type': 'persistent',
+ 'name': 'level-{level}-{project}-dotcache'.format(**config.params),
+ 'mount-point': '/home/worker/.cache',
+ })
+
+ run_command = run['command']
+ if isinstance(run_command, basestring):
+ run_command = ['bash', '-cx', run_command]
+ command = ['/home/worker/bin/run-task']
+ if run['checkout']:
+ command.append('--vcs-checkout=/home/worker/checkouts/gecko')
+ command.append('--')
+ command.extend(run_command)
+ worker['command'] = command
diff --git a/taskcluster/taskgraph/transforms/job/spidermonkey.py b/taskcluster/taskgraph/transforms/job/spidermonkey.py
new file mode 100644
index 000000000..d78b78504
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/spidermonkey.py
@@ -0,0 +1,86 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Support for running spidermonkey jobs via dedicated scripts
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from voluptuous import Schema, Required, Optional, Any
+
+from taskgraph.transforms.job import run_job_using
+from taskgraph.transforms.job.common import (
+ docker_worker_add_public_artifacts,
+ docker_worker_support_vcs_checkout,
+)
+
+sm_run_schema = Schema({
+ Required('using'): Any('spidermonkey', 'spidermonkey-package', 'spidermonkey-mozjs-crate'),
+
+ # The SPIDERMONKEY_VARIANT
+ Required('spidermonkey-variant'): basestring,
+
+ # The tooltool manifest to use; default from sm-tooltool-config.sh is used
+ # if omitted
+ Optional('tooltool-manifest'): basestring,
+})
+
+
+@run_job_using("docker-worker", "spidermonkey")
+@run_job_using("docker-worker", "spidermonkey-package")
+@run_job_using("docker-worker", "spidermonkey-mozjs-crate")
+def docker_worker_spidermonkey(config, job, taskdesc, schema=sm_run_schema):
+ run = job['run']
+
+ worker = taskdesc['worker']
+ worker['artifacts'] = []
+ worker['caches'] = []
+
+ if int(config.params['level']) > 1:
+ worker['caches'].append({
+ 'type': 'persistent',
+ 'name': 'level-{}-{}-build-spidermonkey-workspace'.format(
+ config.params['level'], config.params['project']),
+ 'mount-point': "/home/worker/workspace",
+ })
+
+ docker_worker_add_public_artifacts(config, job, taskdesc)
+
+ env = worker['env']
+ env.update({
+ 'MOZHARNESS_DISABLE': 'true',
+ 'SPIDERMONKEY_VARIANT': run['spidermonkey-variant'],
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ })
+
+ # tooltool downloads; note that this script downloads using the API
+ # endpoiint directly, rather than via relengapi-proxy
+ worker['caches'].append({
+ 'type': 'persistent',
+ 'name': 'tooltool-cache',
+ 'mount-point': '/home/worker/tooltool-cache',
+ })
+ env['TOOLTOOL_CACHE'] = '/home/worker/tooltool-cache'
+ if run.get('tooltool-manifest'):
+ env['TOOLTOOL_MANIFEST'] = run['tooltool-manifest']
+
+ docker_worker_support_vcs_checkout(config, job, taskdesc)
+
+ script = "build-sm.sh"
+ if run['using'] == 'spidermonkey-package':
+ script = "build-sm-package.sh"
+ elif run['using'] == 'spidermonkey-mozjs-crate':
+ script = "build-sm-mozjs-crate.sh"
+
+ worker['command'] = [
+ '/home/worker/bin/run-task',
+ '--chown-recursive', '/home/worker/workspace',
+ '--chown-recursive', '/home/worker/tooltool-cache',
+ '--vcs-checkout', '/home/worker/workspace/build/src',
+ '--',
+ '/bin/bash',
+ '-c',
+ 'cd /home/worker && workspace/build/src/taskcluster/scripts/builder/%s' % script
+ ]
diff --git a/taskcluster/taskgraph/transforms/job/toolchain.py b/taskcluster/taskgraph/transforms/job/toolchain.py
new file mode 100644
index 000000000..d814f7824
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/job/toolchain.py
@@ -0,0 +1,115 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Support for running toolchain-building jobs via dedicated scripts
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from voluptuous import Schema, Required
+
+from taskgraph.transforms.job import run_job_using
+from taskgraph.transforms.job.common import (
+ docker_worker_add_tc_vcs_cache,
+ docker_worker_add_gecko_vcs_env_vars
+)
+
+toolchain_run_schema = Schema({
+ Required('using'): 'toolchain-script',
+
+ # the script (in taskcluster/scripts/misc) to run
+ Required('script'): basestring,
+})
+
+
+@run_job_using("docker-worker", "toolchain-script", schema=toolchain_run_schema)
+def docker_worker_toolchain(config, job, taskdesc):
+ run = job['run']
+
+ worker = taskdesc['worker']
+ worker['artifacts'] = []
+ worker['caches'] = []
+
+ worker['artifacts'].append({
+ 'name': 'public',
+ 'path': '/home/worker/workspace/artifacts/',
+ 'type': 'directory',
+ })
+
+ docker_worker_add_tc_vcs_cache(config, job, taskdesc)
+ docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc)
+
+ env = worker['env']
+ env.update({
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ 'TOOLS_DISABLE': 'true',
+ })
+
+ # tooltool downloads; note that this downloads using the API endpoint directly,
+ # rather than via relengapi-proxy
+ worker['caches'].append({
+ 'type': 'persistent',
+ 'name': 'tooltool-cache',
+ 'mount-point': '/home/worker/tooltool-cache',
+ })
+ env['TOOLTOOL_CACHE'] = '/home/worker/tooltool-cache'
+ env['TOOLTOOL_REPO'] = 'https://github.com/mozilla/build-tooltool'
+ env['TOOLTOOL_REV'] = 'master'
+
+ command = ' && '.join([
+ "cd /home/worker/",
+ "./bin/checkout-sources.sh",
+ "./workspace/build/src/taskcluster/scripts/misc/" + run['script'],
+ ])
+ worker['command'] = ["/bin/bash", "-c", command]
+
+
+@run_job_using("generic-worker", "toolchain-script", schema=toolchain_run_schema)
+def windows_toolchain(config, job, taskdesc):
+ run = job['run']
+
+ worker = taskdesc['worker']
+
+ worker['artifacts'] = [{
+ 'path': r'public\build',
+ 'type': 'directory',
+ }]
+
+ docker_worker_add_gecko_vcs_env_vars(config, job, taskdesc)
+
+ # We fetch LLVM SVN into this.
+ svn_cache = 'level-{}-toolchain-clang-cl-build-svn'.format(config.params['level'])
+ worker['mounts'] = [{
+ 'cache-name': svn_cache,
+ 'path': r'llvm-sources',
+ }]
+ taskdesc['scopes'].extend([
+ 'generic-worker:cache:' + svn_cache,
+ ])
+
+ env = worker['env']
+ env.update({
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ 'TOOLTOOL_REPO': 'https://github.com/mozilla/build-tooltool',
+ 'TOOLTOOL_REV': 'master',
+ })
+
+ hg = r'c:\Program Files\Mercurial\hg.exe'
+ hg_command = ['"{}"'.format(hg)]
+ hg_command.append('robustcheckout')
+ hg_command.extend(['--sharebase', 'y:\\hg-shared'])
+ hg_command.append('--purge')
+ hg_command.extend(['--upstream', 'https://hg.mozilla.org/mozilla-unified'])
+ hg_command.extend(['--revision', '%GECKO_HEAD_REV%'])
+ hg_command.append('%GECKO_HEAD_REPOSITORY%')
+ hg_command.append('.\\build\\src')
+
+ bash = r'c:\mozilla-build\msys\bin\bash'
+ worker['command'] = [
+ ' '.join(hg_command),
+ # do something intelligent.
+ r'{} -c ./build/src/taskcluster/scripts/misc/{}'.format(bash, run['script'])
+ ]
diff --git a/taskcluster/taskgraph/transforms/l10n.py b/taskcluster/taskgraph/transforms/l10n.py
new file mode 100644
index 000000000..42137b558
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/l10n.py
@@ -0,0 +1,44 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Do transforms specific to l10n kind
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import TransformSequence
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def mh_config_replace_project(config, jobs):
+ """ Replaces {project} in mh config entries with the current project """
+ # XXXCallek This is a bad pattern but exists to satisfy ease-of-porting for buildbot
+ for job in jobs:
+ if not job['run'].get('using') == 'mozharness':
+ # Nothing to do, not mozharness
+ yield job
+ continue
+ job['run']['config'] = map(
+ lambda x: x.format(project=config.params['project']),
+ job['run']['config']
+ )
+ yield job
+
+
+@transforms.add
+def mh_options_replace_project(config, jobs):
+ """ Replaces {project} in mh option entries with the current project """
+ # XXXCallek This is a bad pattern but exists to satisfy ease-of-porting for buildbot
+ for job in jobs:
+ if not job['run'].get('using') == 'mozharness':
+ # Nothing to do, not mozharness
+ yield job
+ continue
+ job['run']['options'] = map(
+ lambda x: x.format(project=config.params['project']),
+ job['run']['options']
+ )
+ yield job
diff --git a/taskcluster/taskgraph/transforms/marionette_harness.py b/taskcluster/taskgraph/transforms/marionette_harness.py
new file mode 100644
index 000000000..a24db470c
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/marionette_harness.py
@@ -0,0 +1,37 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Set dynamic task description properties of the marionette-harness task.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import TransformSequence
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def setup_task(config, tasks):
+ for task in tasks:
+ del task['name']
+ task['label'] = 'marionette-harness'
+ env = task['worker'].setdefault('env', {})
+ env.update({
+ 'GECKO_BASE_REPOSITORY': config.params['base_repository'],
+ 'GECKO_HEAD_REF': config.params['head_rev'],
+ 'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
+ 'GECKO_HEAD_REV': config.params['head_rev'],
+ 'MOZ_BUILD_DATE': config.params['moz_build_date'],
+ 'MOZ_SCM_LEVEL': config.params['level'],
+ })
+
+ task['worker']['caches'] = [{
+ 'type': 'persistent',
+ 'name': 'level-{}-{}-tc-vcs'.format(
+ config.params['level'], config.params['project']),
+ 'mount-point': "/home/worker/.tc-vcs",
+ }]
+
+ yield task
diff --git a/taskcluster/taskgraph/transforms/task.py b/taskcluster/taskgraph/transforms/task.py
new file mode 100644
index 000000000..6e371e4ba
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/task.py
@@ -0,0 +1,648 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+These transformations take a task description and turn it into a TaskCluster
+task definition (along with attributes, label, etc.). The input to these
+transformations is generic to any kind of task, but abstracts away some of the
+complexities of worker implementations, scopes, and treeherder annotations.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import json
+import time
+
+from taskgraph.util.treeherder import split_symbol
+from taskgraph.transforms.base import (
+ validate_schema,
+ TransformSequence
+)
+from voluptuous import Schema, Any, Required, Optional, Extra
+
+from .gecko_v2_whitelist import JOB_NAME_WHITELIST, JOB_NAME_WHITELIST_ERROR
+
+# shortcut for a string where task references are allowed
+taskref_or_string = Any(
+ basestring,
+ {Required('task-reference'): basestring})
+
+# A task description is a general description of a TaskCluster task
+task_description_schema = Schema({
+ # the label for this task
+ Required('label'): basestring,
+
+ # description of the task (for metadata)
+ Required('description'): basestring,
+
+ # attributes for this task
+ Optional('attributes'): {basestring: object},
+
+ # dependencies of this task, keyed by name; these are passed through
+ # verbatim and subject to the interpretation of the Task's get_dependencies
+ # method.
+ Optional('dependencies'): {basestring: object},
+
+ # expiration and deadline times, relative to task creation, with units
+ # (e.g., "14 days"). Defaults are set based on the project.
+ Optional('expires-after'): basestring,
+ Optional('deadline-after'): basestring,
+
+ # custom routes for this task; the default treeherder routes will be added
+ # automatically
+ Optional('routes'): [basestring],
+
+ # custom scopes for this task; any scopes required for the worker will be
+ # added automatically
+ Optional('scopes'): [basestring],
+
+ # custom "task.extra" content
+ Optional('extra'): {basestring: object},
+
+ # treeherder-related information; see
+ # https://schemas.taskcluster.net/taskcluster-treeherder/v1/task-treeherder-config.json
+ # If not specified, no treeherder extra information or routes will be
+ # added to the task
+ Optional('treeherder'): {
+ # either a bare symbol, or "grp(sym)".
+ 'symbol': basestring,
+
+ # the job kind
+ 'kind': Any('build', 'test', 'other'),
+
+ # tier for this task
+ 'tier': int,
+
+ # task platform, in the form platform/collection, used to set
+ # treeherder.machine.platform and treeherder.collection or
+ # treeherder.labels
+ 'platform': basestring,
+
+ # treeherder environments (defaults to both staging and production)
+ Required('environments', default=['production', 'staging']): ['production', 'staging'],
+ },
+
+ # information for indexing this build so its artifacts can be discovered;
+ # if omitted, the build will not be indexed.
+ Optional('index'): {
+ # the name of the product this build produces
+ 'product': Any('firefox', 'mobile'),
+
+ # the names to use for this job in the TaskCluster index
+ 'job-name': Any(
+ # Assuming the job is named "normally", this is the v2 job name,
+ # and the v1 and buildbot routes will be determined appropriately.
+ basestring,
+
+ # otherwise, give separate names for each of the legacy index
+ # routes; if a name is omitted, no corresponding route will be
+ # created.
+ {
+ # the name as it appears in buildbot routes
+ Optional('buildbot'): basestring,
+ Required('gecko-v2'): basestring,
+ }
+ ),
+
+ # The rank that the task will receive in the TaskCluster
+ # index. A newly completed task supercedes the currently
+ # indexed task iff it has a higher rank. If unspecified,
+ # 'by-tier' behavior will be used.
+ 'rank': Any(
+ # Rank is equal the timestamp of the build_date for tier-1
+ # tasks, and zero for non-tier-1. This sorts tier-{2,3}
+ # builds below tier-1 in the index.
+ 'by-tier',
+
+ # Rank is given as an integer constant (e.g. zero to make
+ # sure a task is last in the index).
+ int,
+
+ # Rank is equal to the timestamp of the build_date. This
+ # option can be used to override the 'by-tier' behavior
+ # for non-tier-1 tasks.
+ 'build_date',
+ ),
+ },
+
+ # The `run_on_projects` attribute, defaulting to "all". This dictates the
+ # projects on which this task should be included in the target task set.
+ # See the attributes documentation for details.
+ Optional('run-on-projects'): [basestring],
+
+ # If the task can be coalesced, this is the name used in the coalesce key
+ # the project, etc. will be added automatically. Note that try (level 1)
+ # tasks are never coalesced
+ Optional('coalesce-name'): basestring,
+
+ # the provisioner-id/worker-type for the task. The following parameters will
+ # be substituted in this string:
+ # {level} -- the scm level of this push
+ 'worker-type': basestring,
+
+ # information specific to the worker implementation that will run this task
+ 'worker': Any({
+ Required('implementation'): Any('docker-worker', 'docker-engine'),
+
+ # For tasks that will run in docker-worker or docker-engine, this is the
+ # name of the docker image or in-tree docker image to run the task in. If
+ # in-tree, then a dependency will be created automatically. This is
+ # generally `desktop-test`, or an image that acts an awful lot like it.
+ Required('docker-image'): Any(
+ # a raw Docker image path (repo/image:tag)
+ basestring,
+ # an in-tree generated docker image (from `testing/docker/<name>`)
+ {'in-tree': basestring}
+ ),
+
+ # worker features that should be enabled
+ Required('relengapi-proxy', default=False): bool,
+ Required('chain-of-trust', default=False): bool,
+ Required('taskcluster-proxy', default=False): bool,
+ Required('allow-ptrace', default=False): bool,
+ Required('loopback-video', default=False): bool,
+ Required('loopback-audio', default=False): bool,
+
+ # caches to set up for the task
+ Optional('caches'): [{
+ # only one type is supported by any of the workers right now
+ 'type': 'persistent',
+
+ # name of the cache, allowing re-use by subsequent tasks naming the
+ # same cache
+ 'name': basestring,
+
+ # location in the task image where the cache will be mounted
+ 'mount-point': basestring,
+ }],
+
+ # artifacts to extract from the task image after completion
+ Optional('artifacts'): [{
+ # type of artifact -- simple file, or recursive directory
+ 'type': Any('file', 'directory'),
+
+ # task image path from which to read artifact
+ 'path': basestring,
+
+ # name of the produced artifact (root of the names for
+ # type=directory)
+ 'name': basestring,
+ }],
+
+ # environment variables
+ Required('env', default={}): {basestring: taskref_or_string},
+
+ # the command to run
+ 'command': [taskref_or_string],
+
+ # the maximum time to run, in seconds
+ 'max-run-time': int,
+
+ # the exit status code that indicates the task should be retried
+ Optional('retry-exit-status'): int,
+
+ }, {
+ Required('implementation'): 'generic-worker',
+
+ # command is a list of commands to run, sequentially
+ 'command': [taskref_or_string],
+
+ # artifacts to extract from the task image after completion; note that artifacts
+ # for the generic worker cannot have names
+ Optional('artifacts'): [{
+ # type of artifact -- simple file, or recursive directory
+ 'type': Any('file', 'directory'),
+
+ # task image path from which to read artifact
+ 'path': basestring,
+ }],
+
+ # directories and/or files to be mounted
+ Optional('mounts'): [{
+ # a unique name for the cache volume
+ 'cache-name': basestring,
+
+ # task image path for the cache
+ 'path': basestring,
+ }],
+
+ # environment variables
+ Required('env', default={}): {basestring: taskref_or_string},
+
+ # the maximum time to run, in seconds
+ 'max-run-time': int,
+
+ # os user groups for test task workers
+ Optional('os-groups', default=[]): [basestring],
+ }, {
+ Required('implementation'): 'buildbot-bridge',
+
+ # see
+ # https://github.com/mozilla/buildbot-bridge/blob/master/bbb/schemas/payload.yml
+ 'buildername': basestring,
+ 'sourcestamp': {
+ 'branch': basestring,
+ Optional('revision'): basestring,
+ Optional('repository'): basestring,
+ Optional('project'): basestring,
+ },
+ 'properties': {
+ 'product': basestring,
+ Extra: basestring, # additional properties are allowed
+ },
+ }, {
+ 'implementation': 'macosx-engine',
+
+ # A link for an executable to download
+ Optional('link'): basestring,
+
+ # the command to run
+ Required('command'): [taskref_or_string],
+
+ # environment variables
+ Optional('env'): {basestring: taskref_or_string},
+
+ # artifacts to extract from the task image after completion
+ Optional('artifacts'): [{
+ # type of artifact -- simple file, or recursive directory
+ Required('type'): Any('file', 'directory'),
+
+ # task image path from which to read artifact
+ Required('path'): basestring,
+
+ # name of the produced artifact (root of the names for
+ # type=directory)
+ Required('name'): basestring,
+ }],
+ }),
+
+ # The "when" section contains descriptions of the circumstances
+ # under which this task can be "optimized", that is, left out of the
+ # task graph because it is unnecessary.
+ Optional('when'): Any({
+ # This task only needs to be run if a file matching one of the given
+ # patterns has changed in the push. The patterns use the mozpack
+ # match function (python/mozbuild/mozpack/path.py).
+ Optional('files-changed'): [basestring],
+ }),
+})
+
+GROUP_NAMES = {
+ 'tc': 'Executed by TaskCluster',
+ 'tc-e10s': 'Executed by TaskCluster with e10s',
+ 'tc-Fxfn-l': 'Firefox functional tests (local) executed by TaskCluster',
+ 'tc-Fxfn-l-e10s': 'Firefox functional tests (local) executed by TaskCluster with e10s',
+ 'tc-Fxfn-r': 'Firefox functional tests (remote) executed by TaskCluster',
+ 'tc-Fxfn-r-e10s': 'Firefox functional tests (remote) executed by TaskCluster with e10s',
+ 'tc-M': 'Mochitests executed by TaskCluster',
+ 'tc-M-e10s': 'Mochitests executed by TaskCluster with e10s',
+ 'tc-R': 'Reftests executed by TaskCluster',
+ 'tc-R-e10s': 'Reftests executed by TaskCluster with e10s',
+ 'tc-VP': 'VideoPuppeteer tests executed by TaskCluster',
+ 'tc-W': 'Web platform tests executed by TaskCluster',
+ 'tc-W-e10s': 'Web platform tests executed by TaskCluster with e10s',
+ 'tc-X': 'Xpcshell tests executed by TaskCluster',
+ 'tc-X-e10s': 'Xpcshell tests executed by TaskCluster with e10s',
+ 'Aries': 'Aries Device Image',
+ 'Nexus 5-L': 'Nexus 5-L Device Image',
+ 'Cc': 'Toolchain builds',
+ 'SM-tc': 'Spidermonkey builds',
+}
+UNKNOWN_GROUP_NAME = "Treeherder group {} has no name; add it to " + __file__
+
+BUILDBOT_ROUTE_TEMPLATES = [
+ "index.buildbot.branches.{project}.{job-name-buildbot}",
+ "index.buildbot.revisions.{head_rev}.{project}.{job-name-buildbot}",
+]
+
+V2_ROUTE_TEMPLATES = [
+ "index.gecko.v2.{project}.latest.{product}.{job-name-gecko-v2}",
+ "index.gecko.v2.{project}.pushdate.{build_date_long}.{product}.{job-name-gecko-v2}",
+ "index.gecko.v2.{project}.revision.{head_rev}.{product}.{job-name-gecko-v2}",
+]
+
+# the roots of the treeherder routes, keyed by treeherder environment
+TREEHERDER_ROUTE_ROOTS = {
+ 'production': 'tc-treeherder',
+ 'staging': 'tc-treeherder-stage',
+}
+
+COALESCE_KEY = 'builds.{project}.{name}'
+
+# define a collection of payload builders, depending on the worker implementation
+payload_builders = {}
+
+
+def payload_builder(name):
+ def wrap(func):
+ payload_builders[name] = func
+ return func
+ return wrap
+
+
+@payload_builder('docker-worker')
+def build_docker_worker_payload(config, task, task_def):
+ worker = task['worker']
+
+ image = worker['docker-image']
+ if isinstance(image, dict):
+ docker_image_task = 'build-docker-image-' + image['in-tree']
+ task.setdefault('dependencies', {})['docker-image'] = docker_image_task
+ image = {
+ "path": "public/image.tar.zst",
+ "taskId": {"task-reference": "<docker-image>"},
+ "type": "task-image",
+ }
+
+ features = {}
+
+ if worker.get('relengapi-proxy'):
+ features['relengAPIProxy'] = True
+
+ if worker.get('taskcluster-proxy'):
+ features['taskclusterProxy'] = True
+
+ if worker.get('allow-ptrace'):
+ features['allowPtrace'] = True
+ task_def['scopes'].append('docker-worker:feature:allowPtrace')
+
+ if worker.get('chain-of-trust'):
+ features['chainOfTrust'] = True
+
+ capabilities = {}
+
+ for lo in 'audio', 'video':
+ if worker.get('loopback-' + lo):
+ capitalized = 'loopback' + lo.capitalize()
+ devices = capabilities.setdefault('devices', {})
+ devices[capitalized] = True
+ task_def['scopes'].append('docker-worker:capability:device:' + capitalized)
+
+ task_def['payload'] = payload = {
+ 'command': worker['command'],
+ 'image': image,
+ 'env': worker['env'],
+ }
+
+ if 'max-run-time' in worker:
+ payload['maxRunTime'] = worker['max-run-time']
+
+ if 'retry-exit-status' in worker:
+ payload['onExitStatus'] = {'retry': [worker['retry-exit-status']]}
+
+ if 'artifacts' in worker:
+ artifacts = {}
+ for artifact in worker['artifacts']:
+ artifacts[artifact['name']] = {
+ 'path': artifact['path'],
+ 'type': artifact['type'],
+ 'expires': task_def['expires'], # always expire with the task
+ }
+ payload['artifacts'] = artifacts
+
+ if 'caches' in worker:
+ caches = {}
+ for cache in worker['caches']:
+ caches[cache['name']] = cache['mount-point']
+ task_def['scopes'].append('docker-worker:cache:' + cache['name'])
+ payload['cache'] = caches
+
+ if features:
+ payload['features'] = features
+ if capabilities:
+ payload['capabilities'] = capabilities
+
+ # coalesce / superseding
+ if 'coalesce-name' in task and int(config.params['level']) > 1:
+ key = COALESCE_KEY.format(
+ project=config.params['project'],
+ name=task['coalesce-name'])
+ payload['supersederUrl'] = "https://coalesce.mozilla-releng.net/v1/list/" + key
+
+
+@payload_builder('generic-worker')
+def build_generic_worker_payload(config, task, task_def):
+ worker = task['worker']
+
+ artifacts = []
+
+ for artifact in worker['artifacts']:
+ artifacts.append({
+ 'path': artifact['path'],
+ 'type': artifact['type'],
+ 'expires': task_def['expires'], # always expire with the task
+ })
+
+ mounts = []
+
+ for mount in worker.get('mounts', []):
+ mounts.append({
+ 'cacheName': mount['cache-name'],
+ 'directory': mount['path']
+ })
+
+ task_def['payload'] = {
+ 'command': worker['command'],
+ 'artifacts': artifacts,
+ 'env': worker.get('env', {}),
+ 'mounts': mounts,
+ 'maxRunTime': worker['max-run-time'],
+ 'osGroups': worker.get('os-groups', []),
+ }
+
+ if 'retry-exit-status' in worker:
+ raise Exception("retry-exit-status not supported in generic-worker")
+
+
+@payload_builder('macosx-engine')
+def build_macosx_engine_payload(config, task, task_def):
+ worker = task['worker']
+ artifacts = map(lambda artifact: {
+ 'name': artifact['name'],
+ 'path': artifact['path'],
+ 'type': artifact['type'],
+ 'expires': task_def['expires'],
+ }, worker['artifacts'])
+
+ task_def['payload'] = {
+ 'link': worker['link'],
+ 'command': worker['command'],
+ 'env': worker['env'],
+ 'artifacts': artifacts,
+ }
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def validate(config, tasks):
+ for task in tasks:
+ yield validate_schema(
+ task_description_schema, task,
+ "In task {!r}:".format(task.get('label', '?no-label?')))
+
+
+@transforms.add
+def add_index_routes(config, tasks):
+ for task in tasks:
+ index = task.get('index')
+ routes = task.setdefault('routes', [])
+
+ if not index:
+ yield task
+ continue
+
+ job_name = index['job-name']
+ # unpack the v2 name to v1 and buildbot names
+ if isinstance(job_name, basestring):
+ base_name, type_name = job_name.rsplit('-', 1)
+ job_name = {
+ 'buildbot': base_name,
+ 'gecko-v2': '{}-{}'.format(base_name, type_name),
+ }
+
+ if job_name['gecko-v2'] not in JOB_NAME_WHITELIST:
+ raise Exception(JOB_NAME_WHITELIST_ERROR.format(job_name['gecko-v2']))
+
+ subs = config.params.copy()
+ for n in job_name:
+ subs['job-name-' + n] = job_name[n]
+ subs['build_date_long'] = time.strftime("%Y.%m.%d.%Y%m%d%H%M%S",
+ time.gmtime(config.params['build_date']))
+ subs['product'] = index['product']
+
+ if 'buildbot' in job_name:
+ for tpl in BUILDBOT_ROUTE_TEMPLATES:
+ routes.append(tpl.format(**subs))
+ if 'gecko-v2' in job_name:
+ for tpl in V2_ROUTE_TEMPLATES:
+ routes.append(tpl.format(**subs))
+
+ # The default behavior is to rank tasks according to their tier
+ extra_index = task.setdefault('extra', {}).setdefault('index', {})
+ rank = index.get('rank', 'by-tier')
+
+ if rank == 'by-tier':
+ # rank is zero for non-tier-1 tasks and based on pushid for others;
+ # this sorts tier-{2,3} builds below tier-1 in the index
+ tier = task.get('treeherder', {}).get('tier', 3)
+ extra_index['rank'] = 0 if tier > 1 else int(config.params['build_date'])
+ elif rank == 'build_date':
+ extra_index['rank'] = int(config.params['build_date'])
+ else:
+ extra_index['rank'] = rank
+
+ del task['index']
+ yield task
+
+
+@transforms.add
+def build_task(config, tasks):
+ for task in tasks:
+ worker_type = task['worker-type'].format(level=str(config.params['level']))
+ provisioner_id, worker_type = worker_type.split('/', 1)
+
+ routes = task.get('routes', [])
+ scopes = task.get('scopes', [])
+
+ # set up extra
+ extra = task.get('extra', {})
+ task_th = task.get('treeherder')
+ if task_th:
+ extra['treeherderEnv'] = task_th['environments']
+
+ treeherder = extra.setdefault('treeherder', {})
+
+ machine_platform, collection = task_th['platform'].split('/', 1)
+ treeherder['machine'] = {'platform': machine_platform}
+ treeherder['collection'] = {collection: True}
+
+ groupSymbol, symbol = split_symbol(task_th['symbol'])
+ if groupSymbol != '?':
+ treeherder['groupSymbol'] = groupSymbol
+ if groupSymbol not in GROUP_NAMES:
+ raise Exception(UNKNOWN_GROUP_NAME.format(groupSymbol))
+ treeherder['groupName'] = GROUP_NAMES[groupSymbol]
+ treeherder['symbol'] = symbol
+ treeherder['jobKind'] = task_th['kind']
+ treeherder['tier'] = task_th['tier']
+
+ routes.extend([
+ '{}.v2.{}.{}.{}'.format(TREEHERDER_ROUTE_ROOTS[env],
+ config.params['project'],
+ config.params['head_rev'],
+ config.params['pushlog_id'])
+ for env in task_th['environments']
+ ])
+
+ if 'expires-after' not in task:
+ task['expires-after'] = '28 days' if config.params['project'] == 'try' else '1 year'
+
+ if 'deadline-after' not in task:
+ task['deadline-after'] = '1 day'
+
+ if 'coalesce-name' in task and int(config.params['level']) > 1:
+ key = COALESCE_KEY.format(
+ project=config.params['project'],
+ name=task['coalesce-name'])
+ routes.append('coalesce.v1.' + key)
+
+ task_def = {
+ 'provisionerId': provisioner_id,
+ 'workerType': worker_type,
+ 'routes': routes,
+ 'created': {'relative-datestamp': '0 seconds'},
+ 'deadline': {'relative-datestamp': task['deadline-after']},
+ 'expires': {'relative-datestamp': task['expires-after']},
+ 'scopes': scopes,
+ 'metadata': {
+ 'description': task['description'],
+ 'name': task['label'],
+ 'owner': config.params['owner'],
+ 'source': '{}/file/{}/{}'.format(
+ config.params['head_repository'],
+ config.params['head_rev'],
+ config.path),
+ },
+ 'extra': extra,
+ 'tags': {'createdForUser': config.params['owner']},
+ }
+
+ # add the payload and adjust anything else as required (e.g., scopes)
+ payload_builders[task['worker']['implementation']](config, task, task_def)
+
+ attributes = task.get('attributes', {})
+ attributes['run_on_projects'] = task.get('run-on-projects', ['all'])
+
+ yield {
+ 'label': task['label'],
+ 'task': task_def,
+ 'dependencies': task.get('dependencies', {}),
+ 'attributes': attributes,
+ 'when': task.get('when', {}),
+ }
+
+
+# Check that the v2 route templates match those used by Mozharness. This can
+# go away once Mozharness builds are no longer performed in Buildbot, and the
+# Mozharness code referencing routes.json is deleted.
+def check_v2_routes():
+ with open("testing/mozharness/configs/routes.json", "rb") as f:
+ routes_json = json.load(f)
+
+ # we only deal with the 'routes' key here
+ routes = routes_json['routes']
+
+ # we use different variables than mozharness
+ for mh, tg in [
+ ('{index}', 'index'),
+ ('{build_product}', '{product}'),
+ ('{build_name}-{build_type}', '{job-name-gecko-v2}'),
+ ('{year}.{month}.{day}.{pushdate}', '{build_date_long}')]:
+ routes = [r.replace(mh, tg) for r in routes]
+
+ if sorted(routes) != sorted(V2_ROUTE_TEMPLATES):
+ raise Exception("V2_ROUTE_TEMPLATES does not match Mozharness's routes.json: "
+ "%s vs %s" % (V2_ROUTE_TEMPLATES, routes))
+
+check_v2_routes()
diff --git a/taskcluster/taskgraph/transforms/tests/__init__.py b/taskcluster/taskgraph/transforms/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/tests/__init__.py
diff --git a/taskcluster/taskgraph/transforms/tests/all_kinds.py b/taskcluster/taskgraph/transforms/tests/all_kinds.py
new file mode 100644
index 000000000..f2aa1f841
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/tests/all_kinds.py
@@ -0,0 +1,137 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Changes here apply to all tests, regardless of kind.
+
+This is a great place for:
+
+ * Applying rules based on platform, project, etc. that should span kinds
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.util.treeherder import split_symbol, join_symbol
+from taskgraph.transforms.base import TransformSequence, get_keyed_by
+
+import copy
+
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def set_worker_implementation(config, tests):
+ """Set the worker implementation based on the test platform."""
+ for test in tests:
+ if test['test-platform'].startswith('win'):
+ test['worker-implementation'] = 'generic-worker'
+ elif test['test-platform'].startswith('macosx'):
+ test['worker-implementation'] = 'macosx-engine'
+ else:
+ test['worker-implementation'] = 'docker-worker'
+ yield test
+
+
+@transforms.add
+def set_tier(config, tests):
+ """Set the tier based on policy for all test descriptions that do not
+ specify a tier otherwise."""
+ for test in tests:
+ # only override if not set for the test
+ if 'tier' not in test:
+ if test['test-platform'] in ['linux64/debug',
+ 'linux64-asan/opt',
+ 'android-4.3-arm7-api-15/debug',
+ 'android-x86/opt']:
+ test['tier'] = 1
+ else:
+ test['tier'] = 2
+ yield test
+
+
+@transforms.add
+def set_expires_after(config, tests):
+ """Try jobs expire after 2 weeks; everything else lasts 1 year. This helps
+ keep storage costs low."""
+ for test in tests:
+ if 'expires-after' not in test:
+ if config.params['project'] == 'try':
+ test['expires-after'] = "14 days"
+ else:
+ test['expires-after'] = "1 year"
+ yield test
+
+
+@transforms.add
+def set_download_symbols(config, tests):
+ """In general, we download symbols immediately for debug builds, but only
+ on demand for everything else. ASAN builds shouldn't download
+ symbols since they don't product symbol zips see bug 1283879"""
+ for test in tests:
+ if test['test-platform'].split('/')[-1] == 'debug':
+ test['mozharness']['download-symbols'] = True
+ elif test['build-platform'] == 'linux64-asan/opt':
+ if 'download-symbols' in test['mozharness']:
+ del test['mozharness']['download-symbols']
+ else:
+ test['mozharness']['download-symbols'] = 'ondemand'
+ yield test
+
+
+@transforms.add
+def resolve_keyed_by(config, tests):
+ """Resolve fields that can be keyed by platform, etc."""
+ fields = [
+ 'instance-size',
+ 'max-run-time',
+ 'chunks',
+ 'e10s',
+ 'suite',
+ 'run-on-projects',
+ ]
+ for test in tests:
+ for field in fields:
+ test[field] = get_keyed_by(item=test, field=field, item_name=test['test-name'])
+ test['mozharness']['config'] = get_keyed_by(item=test,
+ field='mozharness',
+ subfield='config',
+ item_name=test['test-name'])
+ test['mozharness']['extra-options'] = get_keyed_by(item=test,
+ field='mozharness',
+ subfield='extra-options',
+ item_name=test['test-name'])
+ yield test
+
+
+@transforms.add
+def split_chunks(config, tests):
+ """Based on the 'chunks' key, split tests up into chunks by duplicating
+ them and assigning 'this-chunk' appropriately and updating the treeherder
+ symbol."""
+ for test in tests:
+ if test['chunks'] == 1:
+ test['this-chunk'] = 1
+ yield test
+ continue
+
+ for this_chunk in range(1, test['chunks'] + 1):
+ # copy the test and update with the chunk number
+ chunked = copy.deepcopy(test)
+ chunked['this-chunk'] = this_chunk
+
+ # add the chunk number to the TH symbol
+ group, symbol = split_symbol(chunked['treeherder-symbol'])
+ symbol += str(this_chunk)
+ chunked['treeherder-symbol'] = join_symbol(group, symbol)
+
+ yield chunked
+
+
+@transforms.add
+def set_retry_exit_status(config, tests):
+ """Set the retry exit status to TBPL_RETRY, the value returned by mozharness
+ scripts to indicate a transient failure that should be retried."""
+ for test in tests:
+ test['retry-exit-status'] = 4
+ yield test
diff --git a/taskcluster/taskgraph/transforms/tests/android_test.py b/taskcluster/taskgraph/transforms/tests/android_test.py
new file mode 100644
index 000000000..7c13b16f5
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/tests/android_test.py
@@ -0,0 +1,42 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+These transforms are specific to the android-test kind, and apply defaults to
+the test descriptions appropriate to that kind.
+
+Both the input to and output from these transforms must conform to
+`taskgraph.transforms.tests.test:test_schema`.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+from taskgraph.transforms.base import TransformSequence
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def set_defaults(config, tests):
+ for test in tests:
+ # all Android test tasks download internal objects from tooltool
+ test['mozharness']['tooltool-downloads'] = True
+ test['mozharness']['build-artifact-name'] = 'public/build/target.apk'
+ test['mozharness']['actions'] = ['get-secrets']
+ yield test
+
+
+@transforms.add
+def set_treeherder_machine_platform(config, tests):
+ """Set the appropriate task.extra.treeherder.machine.platform"""
+ # The build names for these build platforms have partially evolved over the
+ # years.. This is temporary until we can clean up the handling of
+ # platforms
+ translation = {
+ 'android-api-15/debug': 'android-4-3-armv7-api15/debug',
+ 'android-api-15/opt': 'android-4-3-armv7-api15/opt',
+ 'android-x86/opt': 'android-4-2-x86/opt',
+ }
+ for test in tests:
+ build_platform = test['build-platform']
+ test['treeherder-machine-platform'] = translation.get(build_platform, build_platform)
+ yield test
diff --git a/taskcluster/taskgraph/transforms/tests/desktop_test.py b/taskcluster/taskgraph/transforms/tests/desktop_test.py
new file mode 100644
index 000000000..44a907903
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/tests/desktop_test.py
@@ -0,0 +1,118 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+These transforms are specific to the desktop-test kind, and apply defaults to
+the test descriptions appropriate to that kind.
+
+Both the input to and output from these transforms must conform to
+`taskgraph.transforms.tests.test:test_schema`.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+from taskgraph.transforms.base import TransformSequence, get_keyed_by
+from taskgraph.util.treeherder import split_symbol, join_symbol
+
+import copy
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def set_defaults(config, tests):
+ for test in tests:
+ build_platform = test['build-platform']
+ if build_platform.startswith('macosx'):
+ target = 'target.dmg'
+ else:
+ target = 'target.tar.bz2'
+ test['mozharness']['build-artifact-name'] = 'public/build/' + target
+ # all desktop tests want to run the bits that require node
+ test['mozharness']['set-moz-node-path'] = True
+ yield test
+
+
+@transforms.add
+def set_treeherder_machine_platform(config, tests):
+ """Set the appropriate task.extra.treeherder.machine.platform"""
+ # Linux64 build platforms for asan and pgo are specified differently to
+ # treeherder. This is temporary until we can clean up the handling of
+ # platforms
+ translation = {
+ 'linux64-asan/opt': 'linux64/asan',
+ 'linux64-pgo/opt': 'linux64/pgo',
+ 'macosx64/debug': 'osx-10-10/debug',
+ 'macosx64/opt': 'osx-10-10/opt',
+ }
+ for test in tests:
+ build_platform = test['build-platform']
+ test_platform = test['test-platform']
+ test['treeherder-machine-platform'] = translation.get(build_platform, test_platform)
+ yield test
+
+
+@transforms.add
+def set_asan_docker_image(config, tests):
+ """Set the appropriate task.extra.treeherder.docker-image"""
+ # Linux64-asan has many leaks with running mochitest-media jobs
+ # on Ubuntu 16.04, please remove this when bug 1289209 is resolved
+ for test in tests:
+ if test['suite'] == 'mochitest/mochitest-media' and \
+ test['build-platform'] == 'linux64-asan/opt':
+ test['docker-image'] = {"in-tree": "desktop-test"}
+ yield test
+
+
+@transforms.add
+def split_e10s(config, tests):
+ for test in tests:
+ e10s = get_keyed_by(item=test, field='e10s',
+ item_name=test['test-name'])
+ test.setdefault('attributes', {})
+ test['e10s'] = False
+ test['attributes']['e10s'] = False
+
+ if e10s == 'both':
+ yield test
+ test = copy.deepcopy(test)
+ e10s = True
+ if e10s:
+ test['test-name'] += '-e10s'
+ test['e10s'] = True
+ test['attributes']['e10s'] = True
+ group, symbol = split_symbol(test['treeherder-symbol'])
+ if group != '?':
+ group += '-e10s'
+ test['treeherder-symbol'] = join_symbol(group, symbol)
+ test['mozharness'].setdefault('extra-options', []).append('--e10s')
+ yield test
+
+
+@transforms.add
+def allow_software_gl_layers(config, tests):
+ for test in tests:
+
+ # since this value defaults to true, but is not applicable on windows,
+ # it's overriden for that platform here.
+ allow = not test['test-platform'].startswith('win') \
+ and get_keyed_by(item=test, field='allow-software-gl-layers',
+ item_name=test['test-name'])
+ if allow:
+ assert test['instance-size'] != 'legacy',\
+ 'Software GL layers on a legacy instance is disallowed (bug 1296086).'
+
+ # This should be set always once bug 1296086 is resolved.
+ test['mozharness'].setdefault('extra-options', [])\
+ .append("--allow-software-gl-layers")
+
+ yield test
+
+
+@transforms.add
+def add_os_groups(config, tests):
+ for test in tests:
+ if test['test-platform'].startswith('win'):
+ groups = get_keyed_by(item=test, field='os-groups', item_name=test['test-name'])
+ if groups:
+ test['os-groups'] = groups
+ yield test
diff --git a/taskcluster/taskgraph/transforms/tests/make_task_description.py b/taskcluster/taskgraph/transforms/tests/make_task_description.py
new file mode 100644
index 000000000..fc3f94893
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/tests/make_task_description.py
@@ -0,0 +1,445 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+These transforms construct a task description to run the given test, based on a
+test description. The implementation here is shared among all test kinds, but
+contains specific support for how we run tests in Gecko (via mozharness,
+invoked in particular ways).
+
+This is a good place to translate a test-description option such as
+`single-core: true` to the implementation of that option in a task description
+(worker options, mozharness commandline, environment variables, etc.)
+
+The test description should be fully formed by the time it reaches these
+transforms, and these transforms should not embody any specific knowledge about
+what should run where. this is the wrong place for special-casing platforms,
+for example - use `all_tests.py` instead.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import TransformSequence
+from taskgraph.transforms.job.common import (
+ docker_worker_support_vcs_checkout,
+)
+
+import logging
+import os.path
+
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+WORKER_TYPE = {
+ # default worker types keyed by instance-size
+ 'large': 'aws-provisioner-v1/gecko-t-linux-large',
+ 'xlarge': 'aws-provisioner-v1/gecko-t-linux-xlarge',
+ 'legacy': 'aws-provisioner-v1/gecko-t-linux-medium',
+ 'default': 'aws-provisioner-v1/gecko-t-linux-large',
+ # windows worker types keyed by test-platform
+ 'windows7-32-vm': 'aws-provisioner-v1/gecko-t-win7-32',
+ 'windows7-32': 'aws-provisioner-v1/gecko-t-win7-32-gpu',
+ 'windows10-64-vm': 'aws-provisioner-v1/gecko-t-win10-64',
+ 'windows10-64': 'aws-provisioner-v1/gecko-t-win10-64-gpu'
+}
+
+ARTIFACTS = [
+ # (artifact name prefix, in-image path)
+ ("public/logs/", "build/upload/logs/"),
+ ("public/test", "artifacts/"),
+ ("public/test_info/", "build/blobber_upload_dir/"),
+]
+
+logger = logging.getLogger(__name__)
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def make_task_description(config, tests):
+ """Convert *test* descriptions to *task* descriptions (input to
+ taskgraph.transforms.task)"""
+
+ for test in tests:
+ label = '{}-{}-{}'.format(config.kind, test['test-platform'], test['test-name'])
+ if test['chunks'] > 1:
+ label += '-{}'.format(test['this-chunk'])
+
+ build_label = test['build-label']
+
+ unittest_try_name = test.get('unittest-try-name', test['test-name'])
+
+ attr_build_platform, attr_build_type = test['build-platform'].split('/', 1)
+
+ suite = test['suite']
+ if '/' in suite:
+ suite, flavor = suite.split('/', 1)
+ else:
+ flavor = suite
+
+ attributes = test.get('attributes', {})
+ attributes.update({
+ 'build_platform': attr_build_platform,
+ 'build_type': attr_build_type,
+ # only keep the first portion of the test platform
+ 'test_platform': test['test-platform'].split('/')[0],
+ 'test_chunk': str(test['this-chunk']),
+ 'unittest_suite': suite,
+ 'unittest_flavor': flavor,
+ 'unittest_try_name': unittest_try_name,
+ })
+
+ taskdesc = {}
+ taskdesc['label'] = label
+ taskdesc['description'] = test['description']
+ taskdesc['attributes'] = attributes
+ taskdesc['dependencies'] = {'build': build_label}
+ taskdesc['deadline-after'] = '1 day'
+ taskdesc['expires-after'] = test['expires-after']
+ taskdesc['routes'] = []
+ taskdesc['run-on-projects'] = test.get('run-on-projects', ['all'])
+ taskdesc['scopes'] = []
+ taskdesc['extra'] = {
+ 'chunks': {
+ 'current': test['this-chunk'],
+ 'total': test['chunks'],
+ },
+ 'suite': {
+ 'name': suite,
+ 'flavor': flavor,
+ },
+ }
+ taskdesc['treeherder'] = {
+ 'symbol': test['treeherder-symbol'],
+ 'kind': 'test',
+ 'tier': test['tier'],
+ 'platform': test.get('treeherder-machine-platform', test['build-platform']),
+ }
+
+ # the remainder (the worker-type and worker) differs depending on the
+ # worker implementation
+ worker_setup_functions[test['worker-implementation']](config, test, taskdesc)
+
+ # yield only the task description, discarding the test description
+ yield taskdesc
+
+
+worker_setup_functions = {}
+
+
+def worker_setup_function(name):
+ def wrap(func):
+ worker_setup_functions[name] = func
+ return func
+ return wrap
+
+
+@worker_setup_function("docker-engine")
+@worker_setup_function("docker-worker")
+def docker_worker_setup(config, test, taskdesc):
+
+ artifacts = [
+ # (artifact name prefix, in-image path)
+ ("public/logs/", "/home/worker/workspace/build/upload/logs/"),
+ ("public/test", "/home/worker/artifacts/"),
+ ("public/test_info/", "/home/worker/workspace/build/blobber_upload_dir/"),
+ ]
+ mozharness = test['mozharness']
+
+ installer_url = ARTIFACT_URL.format('<build>', mozharness['build-artifact-name'])
+ test_packages_url = ARTIFACT_URL.format('<build>',
+ 'public/build/target.test_packages.json')
+ mozharness_url = ARTIFACT_URL.format('<build>',
+ 'public/build/mozharness.zip')
+
+ taskdesc['worker-type'] = WORKER_TYPE[test['instance-size']]
+
+ worker = taskdesc['worker'] = {}
+ worker['implementation'] = test['worker-implementation']
+ worker['docker-image'] = test['docker-image']
+
+ worker['allow-ptrace'] = True # required for all tests, for crashreporter
+ worker['relengapi-proxy'] = False # but maybe enabled for tooltool below
+ worker['loopback-video'] = test['loopback-video']
+ worker['loopback-audio'] = test['loopback-audio']
+ worker['max-run-time'] = test['max-run-time']
+ worker['retry-exit-status'] = test['retry-exit-status']
+
+ worker['artifacts'] = [{
+ 'name': prefix,
+ 'path': os.path.join('/home/worker/workspace', path),
+ 'type': 'directory',
+ } for (prefix, path) in artifacts]
+
+ worker['caches'] = [{
+ 'type': 'persistent',
+ 'name': 'level-{}-{}-test-workspace'.format(
+ config.params['level'], config.params['project']),
+ 'mount-point': "/home/worker/workspace",
+ }]
+
+ env = worker['env'] = {
+ 'MOZHARNESS_CONFIG': ' '.join(mozharness['config']),
+ 'MOZHARNESS_SCRIPT': mozharness['script'],
+ 'MOZILLA_BUILD_URL': {'task-reference': installer_url},
+ 'NEED_PULSEAUDIO': 'true',
+ 'NEED_WINDOW_MANAGER': 'true',
+ }
+
+ if mozharness['set-moz-node-path']:
+ env['MOZ_NODE_PATH'] = '/usr/local/bin/node'
+
+ if 'actions' in mozharness:
+ env['MOZHARNESS_ACTIONS'] = ' '.join(mozharness['actions'])
+
+ if config.params['project'] == 'try':
+ env['TRY_COMMIT_MSG'] = config.params['message']
+
+ # handle some of the mozharness-specific options
+
+ if mozharness['tooltool-downloads']:
+ worker['relengapi-proxy'] = True
+ worker['caches'].append({
+ 'type': 'persistent',
+ 'name': 'tooltool-cache',
+ 'mount-point': '/home/worker/tooltool-cache',
+ })
+ taskdesc['scopes'].extend([
+ 'docker-worker:relengapi-proxy:tooltool.download.internal',
+ 'docker-worker:relengapi-proxy:tooltool.download.public',
+ ])
+
+ # assemble the command line
+ command = [
+ '/home/worker/bin/run-task',
+ # The workspace cache/volume is default owned by root:root.
+ '--chown', '/home/worker/workspace',
+ ]
+
+ # Support vcs checkouts regardless of whether the task runs from
+ # source or not in case it is needed on an interactive loaner.
+ docker_worker_support_vcs_checkout(config, test, taskdesc)
+
+ # If we have a source checkout, run mozharness from it instead of
+ # downloading a zip file with the same content.
+ if test['checkout']:
+ command.extend(['--vcs-checkout', '/home/worker/checkouts/gecko'])
+ env['MOZHARNESS_PATH'] = '/home/worker/checkouts/gecko/testing/mozharness'
+ else:
+ env['MOZHARNESS_URL'] = {'task-reference': mozharness_url}
+
+ command.extend([
+ '--',
+ '/home/worker/bin/test-linux.sh',
+ ])
+
+ if mozharness.get('no-read-buildbot-config'):
+ command.append("--no-read-buildbot-config")
+ command.extend([
+ {"task-reference": "--installer-url=" + installer_url},
+ {"task-reference": "--test-packages-url=" + test_packages_url},
+ ])
+ command.extend(mozharness.get('extra-options', []))
+
+ # TODO: remove the need for run['chunked']
+ if mozharness.get('chunked') or test['chunks'] > 1:
+ # Implement mozharness['chunking-args'], modifying command in place
+ if mozharness['chunking-args'] == 'this-chunk':
+ command.append('--total-chunk={}'.format(test['chunks']))
+ command.append('--this-chunk={}'.format(test['this-chunk']))
+ elif mozharness['chunking-args'] == 'test-suite-suffix':
+ suffix = mozharness['chunk-suffix'].replace('<CHUNK>', str(test['this-chunk']))
+ for i, c in enumerate(command):
+ if isinstance(c, basestring) and c.startswith('--test-suite'):
+ command[i] += suffix
+
+ if 'download-symbols' in mozharness:
+ download_symbols = mozharness['download-symbols']
+ download_symbols = {True: 'true', False: 'false'}.get(download_symbols, download_symbols)
+ command.append('--download-symbols=' + download_symbols)
+
+ worker['command'] = command
+
+
+def normpath(path):
+ return path.replace('/', '\\')
+
+
+def get_firefox_version():
+ with open('browser/config/version.txt', 'r') as f:
+ return f.readline().strip()
+
+
+@worker_setup_function('generic-worker')
+def generic_worker_setup(config, test, taskdesc):
+ artifacts = [
+ {
+ 'path': 'public\\logs\\localconfig.json',
+ 'type': 'file'
+ },
+ {
+ 'path': 'public\\logs\\log_critical.log',
+ 'type': 'file'
+ },
+ {
+ 'path': 'public\\logs\\log_error.log',
+ 'type': 'file'
+ },
+ {
+ 'path': 'public\\logs\\log_fatal.log',
+ 'type': 'file'
+ },
+ {
+ 'path': 'public\\logs\\log_info.log',
+ 'type': 'file'
+ },
+ {
+ 'path': 'public\\logs\\log_raw.log',
+ 'type': 'file'
+ },
+ {
+ 'path': 'public\\logs\\log_warning.log',
+ 'type': 'file'
+ },
+ {
+ 'path': 'public\\test_info',
+ 'type': 'directory'
+ }
+ ]
+ mozharness = test['mozharness']
+
+ build_platform = taskdesc['attributes']['build_platform']
+ test_platform = test['test-platform'].split('/')[0]
+
+ target = 'firefox-{}.en-US.{}'.format(get_firefox_version(), build_platform)
+
+ installer_url = ARTIFACT_URL.format(
+ '<build>', 'public/build/{}.zip'.format(target))
+ test_packages_url = ARTIFACT_URL.format(
+ '<build>', 'public/build/{}.test_packages.json'.format(target))
+ mozharness_url = ARTIFACT_URL.format(
+ '<build>', 'public/build/mozharness.zip')
+
+ taskdesc['worker-type'] = WORKER_TYPE[test_platform]
+
+ taskdesc['scopes'].extend(
+ ['generic-worker:os-group:{}'.format(group) for group in test['os-groups']])
+
+ worker = taskdesc['worker'] = {}
+ worker['os-groups'] = test['os-groups']
+ worker['implementation'] = test['worker-implementation']
+ worker['max-run-time'] = test['max-run-time']
+ worker['artifacts'] = artifacts
+
+ env = worker['env'] = {
+ # Bug 1306989
+ 'APPDATA': '%cd%\\AppData\\Roaming',
+ 'LOCALAPPDATA': '%cd%\\AppData\\Local',
+ 'TEMP': '%cd%\\AppData\\Local\\Temp',
+ 'TMP': '%cd%\\AppData\\Local\\Temp',
+ 'USERPROFILE': '%cd%',
+ }
+
+ # assemble the command line
+ mh_command = [
+ 'c:\\mozilla-build\\python\\python.exe',
+ '-u',
+ 'mozharness\\scripts\\' + normpath(mozharness['script'])
+ ]
+ for mh_config in mozharness['config']:
+ mh_command.extend(['--cfg', 'mozharness\\configs\\' + normpath(mh_config)])
+ mh_command.extend(mozharness.get('extra-options', []))
+ if mozharness.get('no-read-buildbot-config'):
+ mh_command.append('--no-read-buildbot-config')
+ mh_command.extend(['--installer-url', installer_url])
+ mh_command.extend(['--test-packages-url', test_packages_url])
+ if mozharness.get('download-symbols'):
+ if isinstance(mozharness['download-symbols'], basestring):
+ mh_command.extend(['--download-symbols', mozharness['download-symbols']])
+ else:
+ mh_command.extend(['--download-symbols', 'true'])
+
+ # TODO: remove the need for run['chunked']
+ if mozharness.get('chunked') or test['chunks'] > 1:
+ # Implement mozharness['chunking-args'], modifying command in place
+ if mozharness['chunking-args'] == 'this-chunk':
+ mh_command.append('--total-chunk={}'.format(test['chunks']))
+ mh_command.append('--this-chunk={}'.format(test['this-chunk']))
+ elif mozharness['chunking-args'] == 'test-suite-suffix':
+ suffix = mozharness['chunk-suffix'].replace('<CHUNK>', str(test['this-chunk']))
+ for i, c in enumerate(mh_command):
+ if isinstance(c, basestring) and c.startswith('--test-suite'):
+ mh_command[i] += suffix
+
+ worker['command'] = [
+ 'mkdir {} {}'.format(env['APPDATA'], env['TMP']),
+ {'task-reference': 'c:\\mozilla-build\\wget\\wget.exe {}'.format(mozharness_url)},
+ 'c:\\mozilla-build\\info-zip\\unzip.exe mozharness.zip',
+ {'task-reference': ' '.join(mh_command)},
+ 'xcopy build\\blobber_upload_dir public\\test_info /e /i',
+ 'copy /y logs\\*.* public\\logs\\'
+ ]
+
+
+@worker_setup_function("macosx-engine")
+def macosx_engine_setup(config, test, taskdesc):
+ mozharness = test['mozharness']
+
+ installer_url = ARTIFACT_URL.format('<build>', mozharness['build-artifact-name'])
+ test_packages_url = ARTIFACT_URL.format('<build>',
+ 'public/build/target.test_packages.json')
+ mozharness_url = ARTIFACT_URL.format('<build>',
+ 'public/build/mozharness.zip')
+
+ # for now we have only 10.10 machines
+ taskdesc['worker-type'] = 'tc-worker-provisioner/gecko-t-osx-10-10'
+
+ worker = taskdesc['worker'] = {}
+ worker['implementation'] = test['worker-implementation']
+
+ worker['artifacts'] = [{
+ 'name': prefix.rstrip('/'),
+ 'path': path.rstrip('/'),
+ 'type': 'directory',
+ } for (prefix, path) in ARTIFACTS]
+
+ worker['env'] = {
+ 'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
+ 'GECKO_HEAD_REV': config.params['head_rev'],
+ 'MOZHARNESS_CONFIG': ' '.join(mozharness['config']),
+ 'MOZHARNESS_SCRIPT': mozharness['script'],
+ 'MOZHARNESS_URL': {'task-reference': mozharness_url},
+ 'MOZILLA_BUILD_URL': {'task-reference': installer_url},
+ }
+
+ # assemble the command line
+
+ worker['link'] = '{}/raw-file/{}/taskcluster/scripts/tester/test-macosx.sh'.format(
+ config.params['head_repository'], config.params['head_rev']
+ )
+
+ command = worker['command'] = ["./test-macosx.sh"]
+ if mozharness.get('no-read-buildbot-config'):
+ command.append("--no-read-buildbot-config")
+ command.extend([
+ {"task-reference": "--installer-url=" + installer_url},
+ {"task-reference": "--test-packages-url=" + test_packages_url},
+ ])
+ if mozharness.get('include-blob-upload-branch'):
+ command.append('--blob-upload-branch=' + config.params['project'])
+ command.extend(mozharness.get('extra-options', []))
+
+ # TODO: remove the need for run['chunked']
+ if mozharness.get('chunked') or test['chunks'] > 1:
+ # Implement mozharness['chunking-args'], modifying command in place
+ if mozharness['chunking-args'] == 'this-chunk':
+ command.append('--total-chunk={}'.format(test['chunks']))
+ command.append('--this-chunk={}'.format(test['this-chunk']))
+ elif mozharness['chunking-args'] == 'test-suite-suffix':
+ suffix = mozharness['chunk-suffix'].replace('<CHUNK>', str(test['this-chunk']))
+ for i, c in enumerate(command):
+ if isinstance(c, basestring) and c.startswith('--test-suite'):
+ command[i] += suffix
+
+ if 'download-symbols' in mozharness:
+ download_symbols = mozharness['download-symbols']
+ download_symbols = {True: 'true', False: 'false'}.get(download_symbols, download_symbols)
+ command.append('--download-symbols=' + download_symbols)
diff --git a/taskcluster/taskgraph/transforms/tests/test_description.py b/taskcluster/taskgraph/transforms/tests/test_description.py
new file mode 100644
index 000000000..1365919fe
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/tests/test_description.py
@@ -0,0 +1,235 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+This file defines the schema for tests -- the things in `tests.yml`. It should
+be run both before and after the kind-specific transforms, to ensure that the
+transforms do not generate invalid tests.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import validate_schema
+from voluptuous import (
+ Any,
+ Optional,
+ Required,
+ Schema,
+)
+
+
+# Schema for a test description
+#
+# *****WARNING*****
+#
+# This is a great place for baffling cruft to accumulate, and that makes
+# everyone move more slowly. Be considerate of your fellow hackers!
+# See the warnings in taskcluster/docs/how-tos.rst
+#
+# *****WARNING*****
+test_description_schema = Schema({
+ # description of the suite, for the task metadata
+ 'description': basestring,
+
+ # test suite name, or <suite>/<flavor>
+ Required('suite'): Any(
+ basestring,
+ {'by-test-platform': {basestring: basestring}},
+ ),
+
+ # the name by which this test suite is addressed in try syntax; defaults to
+ # the test-name
+ Optional('unittest-try-name'): basestring,
+
+ # the symbol, or group(symbol), under which this task should appear in
+ # treeherder.
+ 'treeherder-symbol': basestring,
+
+ # the value to place in task.extra.treeherder.machine.platform; ideally
+ # this is the same as build-platform, and that is the default, but in
+ # practice it's not always a match.
+ Optional('treeherder-machine-platform'): basestring,
+
+ # attributes to appear in the resulting task (later transforms will add the
+ # common attributes)
+ Optional('attributes'): {basestring: object},
+
+ # The `run_on_projects` attribute, defaulting to "all". This dictates the
+ # projects on which this task should be included in the target task set.
+ # See the attributes documentation for details.
+ Optional('run-on-projects', default=['all']): Any(
+ [basestring],
+ {'by-test-platform': {basestring: [basestring]}},
+ ),
+
+ # the sheriffing tier for this task (default: set based on test platform)
+ Optional('tier'): int,
+
+ # number of chunks to create for this task. This can be keyed by test
+ # platform by passing a dictionary in the `by-test-platform` key. If the
+ # test platform is not found, the key 'default' will be tried.
+ Required('chunks', default=1): Any(
+ int,
+ {'by-test-platform': {basestring: int}},
+ ),
+
+ # the time (with unit) after which this task is deleted; default depends on
+ # the branch (see below)
+ Optional('expires-after'): basestring,
+
+ # Whether to run this task with e10s (desktop-test only). If false, run
+ # without e10s; if true, run with e10s; if 'both', run one task with and
+ # one task without e10s. E10s tasks have "-e10s" appended to the test name
+ # and treeherder group.
+ Required('e10s', default='both'): Any(
+ bool, 'both',
+ {'by-test-platform': {basestring: Any(bool, 'both')}},
+ ),
+
+ # The EC2 instance size to run these tests on.
+ Required('instance-size', default='default'): Any(
+ Any('default', 'large', 'xlarge', 'legacy'),
+ {'by-test-platform': {basestring: Any('default', 'large', 'xlarge', 'legacy')}},
+ ),
+
+ # Whether the task requires loopback audio or video (whatever that may mean
+ # on the platform)
+ Required('loopback-audio', default=False): bool,
+ Required('loopback-video', default=False): bool,
+
+ # Whether the test can run using a software GL implementation on Linux
+ # using the GL compositor. May not be used with "legacy" sized instances
+ # due to poor LLVMPipe performance (bug 1296086).
+ Optional('allow-software-gl-layers', default=True): bool,
+
+ # The worker implementation for this test, as dictated by policy and by the
+ # test platform.
+ Optional('worker-implementation'): Any(
+ 'docker-worker',
+ 'macosx-engine',
+ 'generic-worker',
+ # coming soon:
+ 'docker-engine',
+ 'buildbot-bridge',
+ ),
+
+ # For tasks that will run in docker-worker or docker-engine, this is the
+ # name of the docker image or in-tree docker image to run the task in. If
+ # in-tree, then a dependency will be created automatically. This is
+ # generally `desktop-test`, or an image that acts an awful lot like it.
+ Required('docker-image', default={'in-tree': 'desktop-test'}): Any(
+ # a raw Docker image path (repo/image:tag)
+ basestring,
+ # an in-tree generated docker image (from `testing/docker/<name>`)
+ {'in-tree': basestring}
+ ),
+
+ # seconds of runtime after which the task will be killed. Like 'chunks',
+ # this can be keyed by test pltaform.
+ Required('max-run-time', default=3600): Any(
+ int,
+ {'by-test-platform': {basestring: int}},
+ ),
+
+ # the exit status code that indicates the task should be retried
+ Optional('retry-exit-status'): int,
+
+ # Whether to perform a gecko checkout.
+ Required('checkout', default=False): bool,
+
+ # What to run
+ Required('mozharness'): Any({
+ # the mozharness script used to run this task
+ Required('script'): basestring,
+
+ # the config files required for the task
+ Required('config'): Any(
+ [basestring],
+ {'by-test-platform': {basestring: [basestring]}},
+ ),
+
+ # any additional actions to pass to the mozharness command
+ Optional('actions'): [basestring],
+
+ # additional command-line options for mozharness, beyond those
+ # automatically added
+ Required('extra-options', default=[]): Any(
+ [basestring],
+ {'by-test-platform': {basestring: [basestring]}},
+ ),
+
+ # the artifact name (including path) to test on the build task; this is
+ # generally set in a per-kind transformation
+ Optional('build-artifact-name'): basestring,
+
+ # If true, tooltool downloads will be enabled via relengAPIProxy.
+ Required('tooltool-downloads', default=False): bool,
+
+ # This mozharness script also runs in Buildbot and tries to read a
+ # buildbot config file, so tell it not to do so in TaskCluster
+ Required('no-read-buildbot-config', default=False): bool,
+
+ # Add --blob-upload-branch=<project> mozharness parameter
+ Optional('include-blob-upload-branch'): bool,
+
+ # The setting for --download-symbols (if omitted, the option will not
+ # be passed to mozharness)
+ Optional('download-symbols'): Any(True, 'ondemand'),
+
+ # If set, then MOZ_NODE_PATH=/usr/local/bin/node is included in the
+ # environment. This is more than just a helpful path setting -- it
+ # causes xpcshell tests to start additional servers, and runs
+ # additional tests.
+ Required('set-moz-node-path', default=False): bool,
+
+ # If true, include chunking information in the command even if the number
+ # of chunks is 1
+ Required('chunked', default=False): bool,
+
+ # The chunking argument format to use
+ Required('chunking-args', default='this-chunk'): Any(
+ # Use the usual --this-chunk/--total-chunk arguments
+ 'this-chunk',
+ # Use --test-suite=<suite>-<chunk-suffix>; see chunk-suffix, below
+ 'test-suite-suffix',
+ ),
+
+ # the string to append to the `--test-suite` arugment when
+ # chunking-args = test-suite-suffix; "<CHUNK>" in this string will
+ # be replaced with the chunk number.
+ Optional('chunk-suffix'): basestring,
+ }),
+
+ # The current chunk; this is filled in by `all_kinds.py`
+ Optional('this-chunk'): int,
+
+ # os user groups for test task workers; required scopes, will be
+ # added automatically
+ Optional('os-groups', default=[]): Any(
+ [basestring],
+ # todo: create a dedicated elevated worker group and name here
+ {'by-test-platform': {basestring: [basestring]}},
+ ),
+
+ # -- values supplied by the task-generation infrastructure
+
+ # the platform of the build this task is testing
+ 'build-platform': basestring,
+
+ # the label of the build task generating the materials to test
+ 'build-label': basestring,
+
+ # the platform on which the tests will run
+ 'test-platform': basestring,
+
+ # the name of the test (the key in tests.yml)
+ 'test-name': basestring,
+
+}, required=True)
+
+
+# TODO: can we have validate and validate_full for before and after?
+def validate(config, tests):
+ for test in tests:
+ yield validate_schema(test_description_schema, test,
+ "In test {!r}:".format(test['test-name']))
diff --git a/taskcluster/taskgraph/transforms/upload_symbols.py b/taskcluster/taskgraph/transforms/upload_symbols.py
new file mode 100644
index 000000000..9b4884a97
--- /dev/null
+++ b/taskcluster/taskgraph/transforms/upload_symbols.py
@@ -0,0 +1,36 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Transform the upload-symbols task description template,
+ taskcluster/ci/upload-symbols/job-template.yml
+into an actual task description.
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+from taskgraph.transforms.base import TransformSequence
+
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def fill_template(config, tasks):
+ for task in tasks:
+ # Fill out the dynamic fields in the task description
+ task['label'] = task['build-label'] + '-upload-symbols'
+ task['dependencies'] = {'build': task['build-label']}
+ task['worker']['env']['GECKO_HEAD_REPOSITORY'] = config.params['head_repository']
+ task['worker']['env']['GECKO_HEAD_REV'] = config.params['head_rev']
+
+ build_platform, build_type = task['build-platform'].split('/')
+ attributes = task.setdefault('attributes', {})
+ attributes['build_platform'] = build_platform
+ attributes['build_type'] = build_type
+
+ # clear out the stuff that's not part of a task description
+ del task['build-label']
+ del task['build-platform']
+
+ yield task
diff --git a/taskcluster/taskgraph/try_option_syntax.py b/taskcluster/taskgraph/try_option_syntax.py
new file mode 100644
index 000000000..b5988db98
--- /dev/null
+++ b/taskcluster/taskgraph/try_option_syntax.py
@@ -0,0 +1,559 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import copy
+import logging
+import re
+import shlex
+
+logger = logging.getLogger(__name__)
+
+TRY_DELIMITER = 'try:'
+
+# The build type aliases are very cryptic and only used in try flags these are
+# mappings from the single char alias to a longer more recognizable form.
+BUILD_TYPE_ALIASES = {
+ 'o': 'opt',
+ 'd': 'debug'
+}
+
+# consider anything in this whitelist of kinds to be governed by -b/-p
+BUILD_KINDS = set([
+ 'build',
+ 'artifact-build',
+ 'hazard',
+ 'l10n',
+ 'upload-symbols',
+ 'valgrind',
+ 'static-analysis',
+ 'spidermonkey',
+])
+
+# anything in this list is governed by -j
+JOB_KINDS = set([
+ 'source-check',
+ 'toolchain',
+ 'marionette-harness',
+ 'android-stuff',
+])
+
+
+# mapping from shortcut name (usable with -u) to a boolean function identifying
+# matching test names
+def alias_prefix(prefix):
+ return lambda name: name.startswith(prefix)
+
+
+def alias_contains(infix):
+ return lambda name: infix in name
+
+
+def alias_matches(pattern):
+ pattern = re.compile(pattern)
+ return lambda name: pattern.match(name)
+
+UNITTEST_ALIASES = {
+ # Aliases specify shorthands that can be used in try syntax. The shorthand
+ # is the dictionary key, with the value representing a pattern for matching
+ # unittest_try_names.
+ #
+ # Note that alias expansion is performed in the absence of any chunk
+ # prefixes. For example, the first example above would replace "foo-7"
+ # with "foobar-7". Note that a few aliases allowed chunks to be specified
+ # without a leading `-`, for example 'mochitest-dt1'. That's no longer
+ # supported.
+ 'cppunit': alias_prefix('cppunit'),
+ 'crashtest': alias_prefix('crashtest'),
+ 'crashtest-e10s': alias_prefix('crashtest-e10s'),
+ 'e10s': alias_contains('e10s'),
+ 'external-media-tests': alias_prefix('external-media-tests'),
+ 'firefox-ui-functional': alias_prefix('firefox-ui-functional'),
+ 'firefox-ui-functional-e10s': alias_prefix('firefox-ui-functional-e10s'),
+ 'gaia-js-integration': alias_contains('gaia-js-integration'),
+ 'gtest': alias_prefix('gtest'),
+ 'jittest': alias_prefix('jittest'),
+ 'jittests': alias_prefix('jittest'),
+ 'jsreftest': alias_prefix('jsreftest'),
+ 'jsreftest-e10s': alias_prefix('jsreftest-e10s'),
+ 'marionette': alias_prefix('marionette'),
+ 'marionette-e10s': alias_prefix('marionette-e10s'),
+ 'mochitest': alias_prefix('mochitest'),
+ 'mochitests': alias_prefix('mochitest'),
+ 'mochitest-e10s': alias_prefix('mochitest-e10s'),
+ 'mochitests-e10s': alias_prefix('mochitest-e10s'),
+ 'mochitest-debug': alias_prefix('mochitest-debug-'),
+ 'mochitest-a11y': alias_contains('mochitest-a11y'),
+ 'mochitest-bc': alias_prefix('mochitest-browser-chrome'),
+ 'mochitest-e10s-bc': alias_prefix('mochitest-e10s-browser-chrome'),
+ 'mochitest-browser-chrome': alias_prefix('mochitest-browser-chrome'),
+ 'mochitest-e10s-browser-chrome': alias_prefix('mochitest-e10s-browser-chrome'),
+ 'mochitest-chrome': alias_contains('mochitest-chrome'),
+ 'mochitest-dt': alias_prefix('mochitest-devtools-chrome'),
+ 'mochitest-e10s-dt': alias_prefix('mochitest-e10s-devtools-chrome'),
+ 'mochitest-gl': alias_prefix('mochitest-webgl'),
+ 'mochitest-gl-e10s': alias_prefix('mochitest-webgl-e10s'),
+ 'mochitest-gpu': alias_prefix('mochitest-gpu'),
+ 'mochitest-gpu-e10s': alias_prefix('mochitest-gpu-e10s'),
+ 'mochitest-clipboard': alias_prefix('mochitest-clipboard'),
+ 'mochitest-clipboard-e10s': alias_prefix('mochitest-clipboard-e10s'),
+ 'mochitest-jetpack': alias_prefix('mochitest-jetpack'),
+ 'mochitest-media': alias_prefix('mochitest-media'),
+ 'mochitest-media-e10s': alias_prefix('mochitest-media-e10s'),
+ 'mochitest-vg': alias_prefix('mochitest-valgrind'),
+ 'reftest': alias_matches(r'^(plain-)?reftest.*$'),
+ 'reftest-no-accel': alias_matches(r'^(plain-)?reftest-no-accel.*$'),
+ 'reftests': alias_matches(r'^(plain-)?reftest.*$'),
+ 'reftests-e10s': alias_matches(r'^(plain-)?reftest-e10s.*$'),
+ 'robocop': alias_prefix('robocop'),
+ 'web-platform-test': alias_prefix('web-platform-tests'),
+ 'web-platform-tests': alias_prefix('web-platform-tests'),
+ 'web-platform-tests-e10s': alias_prefix('web-platform-tests-e10s'),
+ 'web-platform-tests-reftests': alias_prefix('web-platform-tests-reftests'),
+ 'web-platform-tests-reftests-e10s': alias_prefix('web-platform-tests-reftests-e10s'),
+ 'xpcshell': alias_prefix('xpcshell'),
+}
+
+# unittest platforms can be specified by substring of the "pretty name", which
+# is basically the old Buildbot builder name. This dict has {pretty name,
+# [test_platforms]} translations, This includes only the most commonly-used
+# substrings. This is intended only for backward-compatibility. New test
+# platforms should have their `test_platform` spelled out fully in try syntax.
+UNITTEST_PLATFORM_PRETTY_NAMES = {
+ 'Ubuntu': ['linux', 'linux64', 'linux64-asan'],
+ 'x64': ['linux64', 'linux64-asan'],
+ 'Android 4.3': ['android-4.3-arm7-api-15'],
+ # other commonly-used substrings for platforms not yet supported with
+ # in-tree taskgraphs:
+ # '10.10': [..TODO..],
+ # '10.10.5': [..TODO..],
+ # '10.6': [..TODO..],
+ # '10.8': [..TODO..],
+ # 'Android 2.3 API9': [..TODO..],
+ # 'Windows 7': [..TODO..],
+ # 'Windows 7 VM': [..TODO..],
+ # 'Windows 8': [..TODO..],
+ # 'Windows XP': [..TODO..],
+ # 'win32': [..TODO..],
+ # 'win64': [..TODO..],
+}
+
+# We have a few platforms for which we want to do some "extra" builds, or at
+# least build-ish things. Sort of. Anyway, these other things are implemented
+# as different "platforms". These do *not* automatically ride along with "-p
+# all"
+RIDEALONG_BUILDS = {
+ 'android-api-15': [
+ 'android-api-15-l10n',
+ ],
+ 'linux': [
+ 'linux-l10n',
+ ],
+ 'linux64': [
+ 'linux64-l10n',
+ 'sm-plain',
+ 'sm-nonunified',
+ 'sm-arm-sim',
+ 'sm-arm64-sim',
+ 'sm-compacting',
+ 'sm-rootanalysis',
+ 'sm-package',
+ 'sm-tsan',
+ 'sm-asan',
+ 'sm-mozjs-sys',
+ 'sm-msan',
+ ],
+}
+
+TEST_CHUNK_SUFFIX = re.compile('(.*)-([0-9]+)$')
+
+
+class TryOptionSyntax(object):
+
+ def __init__(self, message, full_task_graph):
+ """
+ Parse a "try syntax" formatted commit message. This is the old "-b do -p
+ win32 -u all" format. Aliases are applied to map short names to full
+ names.
+
+ The resulting object has attributes:
+
+ - build_types: a list containing zero or more of 'opt' and 'debug'
+ - platforms: a list of selected platform names, or None for all
+ - unittests: a list of tests, of the form given below, or None for all
+ - jobs: a list of requested job names, or None for all
+ - trigger_tests: the number of times tests should be triggered (--rebuild)
+ - interactive: true if --interactive
+ - notifications: either None if no notifications or one of 'all' or 'failure'
+
+ Note that -t is currently completely ignored.
+
+ The unittests and talos lists contain dictionaries of the form:
+
+ {
+ 'test': '<suite name>',
+ 'platforms': [..platform names..], # to limit to only certain platforms
+ 'only_chunks': set([..chunk numbers..]), # to limit only to certain chunks
+ }
+ """
+ self.jobs = []
+ self.build_types = []
+ self.platforms = []
+ self.unittests = []
+ self.talos = []
+ self.trigger_tests = 0
+ self.interactive = False
+ self.notifications = None
+
+ # shlex used to ensure we split correctly when giving values to argparse.
+ parts = shlex.split(self.escape_whitespace_in_brackets(message))
+ try_idx = None
+ for idx, part in enumerate(parts):
+ if part == TRY_DELIMITER:
+ try_idx = idx
+ break
+
+ if try_idx is None:
+ return
+
+ # Argument parser based on try flag flags
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-b', '--build', dest='build_types')
+ parser.add_argument('-p', '--platform', nargs='?',
+ dest='platforms', const='all', default='all')
+ parser.add_argument('-u', '--unittests', nargs='?',
+ dest='unittests', const='all', default='all')
+ parser.add_argument('-t', '--talos', nargs='?', dest='talos', const='all', default='all')
+ parser.add_argument('-i', '--interactive',
+ dest='interactive', action='store_true', default=False)
+ parser.add_argument('-e', '--all-emails',
+ dest='notifications', action='store_const', const='all')
+ parser.add_argument('-f', '--failure-emails',
+ dest='notifications', action='store_const', const='failure')
+ parser.add_argument('-j', '--job', dest='jobs', action='append')
+ # In order to run test jobs multiple times
+ parser.add_argument('--rebuild', dest='trigger_tests', type=int, default=1)
+ args, _ = parser.parse_known_args(parts[try_idx:])
+
+ self.jobs = self.parse_jobs(args.jobs)
+ self.build_types = self.parse_build_types(args.build_types)
+ self.platforms = self.parse_platforms(args.platforms)
+ self.unittests = self.parse_test_option(
+ "unittest_try_name", args.unittests, full_task_graph)
+ self.talos = self.parse_test_option("talos_try_name", args.talos, full_task_graph)
+ self.trigger_tests = args.trigger_tests
+ self.interactive = args.interactive
+ self.notifications = args.notifications
+
+ def parse_jobs(self, jobs_arg):
+ if not jobs_arg or jobs_arg == ['all']:
+ return None
+ expanded = []
+ for job in jobs_arg:
+ expanded.extend(j.strip() for j in job.split(','))
+ return expanded
+
+ def parse_build_types(self, build_types_arg):
+ if build_types_arg is None:
+ build_types_arg = []
+ build_types = filter(None, [BUILD_TYPE_ALIASES.get(build_type) for
+ build_type in build_types_arg])
+ return build_types
+
+ def parse_platforms(self, platform_arg):
+ if platform_arg == 'all':
+ return None
+
+ results = []
+ for build in platform_arg.split(','):
+ results.append(build)
+ if build in RIDEALONG_BUILDS:
+ results.extend(RIDEALONG_BUILDS[build])
+ logger.info("platform %s triggers ridealong builds %s" %
+ (build, ', '.join(RIDEALONG_BUILDS[build])))
+
+ return results
+
+ def parse_test_option(self, attr_name, test_arg, full_task_graph):
+ '''
+
+ Parse a unittest (-u) or talos (-t) option, in the context of a full
+ task graph containing available `unittest_try_name` or `talos_try_name`
+ attributes. There are three cases:
+
+ - test_arg is == 'none' (meaning an empty list)
+ - test_arg is == 'all' (meaning use the list of jobs for that job type)
+ - test_arg is comma string which needs to be parsed
+ '''
+
+ # Empty job list case...
+ if test_arg is None or test_arg == 'none':
+ return []
+
+ all_platforms = set(t.attributes['test_platform']
+ for t in full_task_graph.tasks.itervalues()
+ if 'test_platform' in t.attributes)
+
+ tests = self.parse_test_opts(test_arg, all_platforms)
+
+ if not tests:
+ return []
+
+ all_tests = set(t.attributes[attr_name]
+ for t in full_task_graph.tasks.itervalues()
+ if attr_name in t.attributes)
+
+ # Special case where tests is 'all' and must be expanded
+ if tests[0]['test'] == 'all':
+ results = []
+ all_entry = tests[0]
+ for test in all_tests:
+ entry = {'test': test}
+ # If there are platform restrictions copy them across the list.
+ if 'platforms' in all_entry:
+ entry['platforms'] = list(all_entry['platforms'])
+ results.append(entry)
+ return self.parse_test_chunks(all_tests, results)
+ else:
+ return self.parse_test_chunks(all_tests, tests)
+
+ def parse_test_opts(self, input_str, all_platforms):
+ '''
+ Parse `testspec,testspec,..`, where each testspec is a test name
+ optionally followed by a list of test platforms or negated platforms in
+ `[]`.
+
+ No brackets indicates that tests should run on all platforms for which
+ builds are available. If testspecs are provided, then each is treated,
+ from left to right, as an instruction to include or (if negated)
+ exclude a set of test platforms. A single spec may expand to multiple
+ test platforms via UNITTEST_PLATFORM_PRETTY_NAMES. If the first test
+ spec is negated, processing begins with the full set of available test
+ platforms; otherwise, processing begins with an empty set of test
+ platforms.
+ '''
+
+ # Final results which we will return.
+ tests = []
+
+ cur_test = {}
+ token = ''
+ in_platforms = False
+
+ def normalize_platforms():
+ if 'platforms' not in cur_test:
+ return
+ # if the first spec is a negation, start with all platforms
+ if cur_test['platforms'][0][0] == '-':
+ platforms = all_platforms.copy()
+ else:
+ platforms = []
+ for platform in cur_test['platforms']:
+ if platform[0] == '-':
+ platforms = [p for p in platforms if p != platform[1:]]
+ else:
+ platforms.append(platform)
+ cur_test['platforms'] = platforms
+
+ def add_test(value):
+ normalize_platforms()
+ cur_test['test'] = value.strip()
+ tests.insert(0, cur_test)
+
+ def add_platform(value):
+ platform = value.strip()
+ if platform[0] == '-':
+ negated = True
+ platform = platform[1:]
+ else:
+ negated = False
+ platforms = UNITTEST_PLATFORM_PRETTY_NAMES.get(platform, [platform])
+ if negated:
+ platforms = ["-" + p for p in platforms]
+ cur_test['platforms'] = platforms + cur_test.get('platforms', [])
+
+ # This might be somewhat confusing but we parse the string _backwards_ so
+ # there is no ambiguity over what state we are in.
+ for char in reversed(input_str):
+
+ # , indicates exiting a state
+ if char == ',':
+
+ # Exit a particular platform.
+ if in_platforms:
+ add_platform(token)
+
+ # Exit a particular test.
+ else:
+ add_test(token)
+ cur_test = {}
+
+ # Token must always be reset after we exit a state
+ token = ''
+ elif char == '[':
+ # Exiting platform state entering test state.
+ add_platform(token)
+ token = ''
+ in_platforms = False
+ elif char == ']':
+ # Entering platform state.
+ in_platforms = True
+ else:
+ # Accumulator.
+ token = char + token
+
+ # Handle any left over tokens.
+ if token:
+ add_test(token)
+
+ return tests
+
+ def handle_alias(self, test, all_tests):
+ '''
+ Expand a test if its name refers to an alias, returning a list of test
+ dictionaries cloned from the first (to maintain any metadata).
+ '''
+ if test['test'] not in UNITTEST_ALIASES:
+ return [test]
+
+ alias = UNITTEST_ALIASES[test['test']]
+
+ def mktest(name):
+ newtest = copy.deepcopy(test)
+ newtest['test'] = name
+ return newtest
+
+ def exprmatch(alias):
+ return [t for t in all_tests if alias(t)]
+
+ return [mktest(t) for t in exprmatch(alias)]
+
+ def parse_test_chunks(self, all_tests, tests):
+ '''
+ Test flags may include parameters to narrow down the number of chunks in a
+ given push. We don't model 1 chunk = 1 job in taskcluster so we must check
+ each test flag to see if it is actually specifying a chunk.
+ '''
+ results = []
+ seen_chunks = {}
+ for test in tests:
+ matches = TEST_CHUNK_SUFFIX.match(test['test'])
+ if matches:
+ name = matches.group(1)
+ chunk = matches.group(2)
+ if name in seen_chunks:
+ seen_chunks[name].add(chunk)
+ else:
+ seen_chunks[name] = {chunk}
+ test['test'] = name
+ test['only_chunks'] = seen_chunks[name]
+ results.append(test)
+ else:
+ results.extend(self.handle_alias(test, all_tests))
+
+ # uniquify the results over the test names
+ results = {test['test']: test for test in results}.values()
+ return results
+
+ def find_all_attribute_suffixes(self, graph, prefix):
+ rv = set()
+ for t in graph.tasks.itervalues():
+ for a in t.attributes:
+ if a.startswith(prefix):
+ rv.add(a[len(prefix):])
+ return sorted(rv)
+
+ def escape_whitespace_in_brackets(self, input_str):
+ '''
+ In tests you may restrict them by platform [] inside of the brackets
+ whitespace may occur this is typically invalid shell syntax so we escape it
+ with backslash sequences .
+ '''
+ result = ""
+ in_brackets = False
+ for char in input_str:
+ if char == '[':
+ in_brackets = True
+ result += char
+ continue
+
+ if char == ']':
+ in_brackets = False
+ result += char
+ continue
+
+ if char == ' ' and in_brackets:
+ result += '\ '
+ continue
+
+ result += char
+
+ return result
+
+ def task_matches(self, attributes):
+ attr = attributes.get
+
+ def check_run_on_projects():
+ return set(['try', 'all']) & set(attr('run_on_projects', []))
+
+ def match_test(try_spec, attr_name):
+ if attr('build_type') not in self.build_types:
+ return False
+ if self.platforms is not None:
+ if attr('build_platform') not in self.platforms:
+ return False
+ else:
+ if not check_run_on_projects():
+ return False
+ if try_spec is None:
+ return True
+ # TODO: optimize this search a bit
+ for test in try_spec:
+ if attr(attr_name) == test['test']:
+ break
+ else:
+ return False
+ if 'platforms' in test and attr('test_platform') not in test['platforms']:
+ return False
+ if 'only_chunks' in test and attr('test_chunk') not in test['only_chunks']:
+ return False
+ return True
+
+ if attr('kind') in ('desktop-test', 'android-test'):
+ return match_test(self.unittests, 'unittest_try_name')
+ elif attr('kind') in JOB_KINDS:
+ if self.jobs is None:
+ return True
+ if attr('build_platform') in self.jobs:
+ return True
+ elif attr('kind') in BUILD_KINDS:
+ if attr('build_type') not in self.build_types:
+ return False
+ elif self.platforms is None:
+ # for "-p all", look for try in the 'run_on_projects' attribute
+ return check_run_on_projects()
+ else:
+ if attr('build_platform') not in self.platforms:
+ return False
+ return True
+ else:
+ return False
+
+ def __str__(self):
+ def none_for_all(list):
+ if list is None:
+ return '<all>'
+ return ', '.join(str(e) for e in list)
+
+ return "\n".join([
+ "build_types: " + ", ".join(self.build_types),
+ "platforms: " + none_for_all(self.platforms),
+ "unittests: " + none_for_all(self.unittests),
+ "jobs: " + none_for_all(self.jobs),
+ "trigger_tests: " + str(self.trigger_tests),
+ "interactive: " + str(self.interactive),
+ "notifications: " + self.notifications,
+ ])
diff --git a/taskcluster/taskgraph/util/__init__.py b/taskcluster/taskgraph/util/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/taskcluster/taskgraph/util/__init__.py
diff --git a/taskcluster/taskgraph/util/attributes.py b/taskcluster/taskgraph/util/attributes.py
new file mode 100644
index 000000000..b44a3364f
--- /dev/null
+++ b/taskcluster/taskgraph/util/attributes.py
@@ -0,0 +1,26 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+def attrmatch(attributes, **kwargs):
+ """Determine whether the given set of task attributes matches. The
+ conditions are given as keyword arguments, where each keyword names an
+ attribute. The keyword value can be a literal, a set, or a callable. A
+ literal must match the attribute exactly. Given a set, the attribute value
+ must be in the set. A callable is called with the attribute value. If an
+ attribute is specified as a keyword argument but not present in the
+ attributes, the result is False."""
+ for kwkey, kwval in kwargs.iteritems():
+ if kwkey not in attributes:
+ return False
+ attval = attributes[kwkey]
+ if isinstance(kwval, set):
+ if attval not in kwval:
+ return False
+ elif callable(kwval):
+ if not kwval(attval):
+ return False
+ elif kwval != attributes[kwkey]:
+ return False
+ return True
diff --git a/taskcluster/taskgraph/util/docker.py b/taskcluster/taskgraph/util/docker.py
new file mode 100644
index 000000000..df97e57bc
--- /dev/null
+++ b/taskcluster/taskgraph/util/docker.py
@@ -0,0 +1,160 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import hashlib
+import os
+import shutil
+import subprocess
+import tarfile
+import tempfile
+
+from mozpack.archive import (
+ create_tar_gz_from_files,
+)
+
+
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
+DOCKER_ROOT = os.path.join(GECKO, 'testing', 'docker')
+INDEX_PREFIX = 'docker.images.v2'
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+
+
+def docker_image(name, default_version=None):
+ '''Determine the docker image name, including repository and tag, from an
+ in-tree docker file.'''
+ try:
+ with open(os.path.join(DOCKER_ROOT, name, 'REGISTRY')) as f:
+ registry = f.read().strip()
+ except IOError:
+ with open(os.path.join(DOCKER_ROOT, 'REGISTRY')) as f:
+ registry = f.read().strip()
+
+ try:
+ with open(os.path.join(DOCKER_ROOT, name, 'VERSION')) as f:
+ version = f.read().strip()
+ except IOError:
+ if not default_version:
+ raise
+
+ version = default_version
+
+ return '{}/{}:{}'.format(registry, name, version)
+
+
+def generate_context_hash(topsrcdir, image_path, image_name):
+ """Generates a sha256 hash for context directory used to build an image."""
+
+ # It is a bit unfortunate we have to create a temp file here - it would
+ # be nicer to use an in-memory buffer.
+ fd, p = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ return create_context_tar(topsrcdir, image_path, p, image_name)
+ finally:
+ os.unlink(p)
+
+
+def create_context_tar(topsrcdir, context_dir, out_path, prefix):
+ """Create a context tarball.
+
+ A directory ``context_dir`` containing a Dockerfile will be assembled into
+ a gzipped tar file at ``out_path``. Files inside the archive will be
+ prefixed by directory ``prefix``.
+
+ We also scan the source Dockerfile for special syntax that influences
+ context generation.
+
+ If a line in the Dockerfile has the form ``# %include <path>``,
+ the relative path specified on that line will be matched against
+ files in the source repository and added to the context under the
+ path ``topsrcdir/``. If an entry is a directory, we add all files
+ under that directory.
+
+ Returns the SHA-256 hex digest of the created archive.
+ """
+ archive_files = {}
+
+ for root, dirs, files in os.walk(context_dir):
+ for f in files:
+ source_path = os.path.join(root, f)
+ rel = source_path[len(context_dir) + 1:]
+ archive_path = os.path.join(prefix, rel)
+ archive_files[archive_path] = source_path
+
+ # Parse Dockerfile for special syntax of extra files to include.
+ with open(os.path.join(context_dir, 'Dockerfile'), 'rb') as fh:
+ for line in fh:
+ line = line.rstrip()
+ if not line.startswith('# %include'):
+ continue
+
+ p = line[len('# %include '):].strip()
+ if os.path.isabs(p):
+ raise Exception('extra include path cannot be absolute: %s' % p)
+
+ fs_path = os.path.normpath(os.path.join(topsrcdir, p))
+ # Check for filesystem traversal exploits.
+ if not fs_path.startswith(topsrcdir):
+ raise Exception('extra include path outside topsrcdir: %s' % p)
+
+ if not os.path.exists(fs_path):
+ raise Exception('extra include path does not exist: %s' % p)
+
+ if os.path.isdir(fs_path):
+ for root, dirs, files in os.walk(fs_path):
+ for f in files:
+ source_path = os.path.join(root, f)
+ archive_path = os.path.join(prefix, 'topsrcdir', p, f)
+ archive_files[archive_path] = source_path
+ else:
+ archive_path = os.path.join(prefix, 'topsrcdir', p)
+ archive_files[archive_path] = fs_path
+
+ with open(out_path, 'wb') as fh:
+ create_tar_gz_from_files(fh, archive_files, '%s.tar.gz' % prefix)
+
+ h = hashlib.sha256()
+ with open(out_path, 'rb') as fh:
+ while True:
+ data = fh.read(32768)
+ if not data:
+ break
+ h.update(data)
+ return h.hexdigest()
+
+
+def build_from_context(docker_bin, context_path, prefix, tag=None):
+ """Build a Docker image from a context archive.
+
+ Given the path to a `docker` binary, a image build tar.gz (produced with
+ ``create_context_tar()``, a prefix in that context containing files, and
+ an optional ``tag`` for the produced image, build that Docker image.
+ """
+ d = tempfile.mkdtemp()
+ try:
+ with tarfile.open(context_path, 'r:gz') as tf:
+ tf.extractall(d)
+
+ # If we wanted to do post-processing of the Dockerfile, this is
+ # where we'd do it.
+
+ args = [
+ docker_bin,
+ 'build',
+ # Use --no-cache so we always get the latest package updates.
+ '--no-cache',
+ ]
+
+ if tag:
+ args.extend(['-t', tag])
+
+ args.append('.')
+
+ res = subprocess.call(args, cwd=os.path.join(d, prefix))
+ if res:
+ raise Exception('error building image')
+ finally:
+ shutil.rmtree(d)
diff --git a/taskcluster/taskgraph/util/python_path.py b/taskcluster/taskgraph/util/python_path.py
new file mode 100644
index 000000000..b14223ca6
--- /dev/null
+++ b/taskcluster/taskgraph/util/python_path.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+
+def find_object(path):
+ """
+ Find a Python object given a path of the form <modulepath>:<objectpath>.
+ Conceptually equivalent to
+
+ def find_object(modulepath, objectpath):
+ import <modulepath> as mod
+ return mod.<objectpath>
+ """
+ if path.count(':') != 1:
+ raise ValueError(
+ 'python path {!r} does not have the form "module:object"'.format(path))
+
+ modulepath, objectpath = path.split(':')
+ obj = __import__(modulepath)
+ for a in modulepath.split('.')[1:]:
+ obj = getattr(obj, a)
+ for a in objectpath.split('.'):
+ obj = getattr(obj, a)
+ return obj
diff --git a/taskcluster/taskgraph/util/seta.py b/taskcluster/taskgraph/util/seta.py
new file mode 100644
index 000000000..a0cd30675
--- /dev/null
+++ b/taskcluster/taskgraph/util/seta.py
@@ -0,0 +1,85 @@
+import json
+import logging
+import requests
+from redo import retry
+from requests import exceptions
+
+logger = logging.getLogger(__name__)
+headers = {
+ 'User-Agent': 'TaskCluster'
+}
+
+# It's a list of project name which SETA is useful on
+SETA_PROJECTS = ['mozilla-inbound', 'autoland']
+SETA_ENDPOINT = "https://seta.herokuapp.com/data/setadetails/?branch=%s"
+
+
+class SETA(object):
+ """
+ Interface to the SETA service, which defines low-value tasks that can be optimized out
+ of the taskgraph.
+ """
+ def __init__(self):
+ # cached low value tasks, by project
+ self.low_value_tasks = {}
+
+ def query_low_value_tasks(self, project):
+ # Request the set of low value tasks from the SETA service. Low value tasks will be
+ # optimized out of the task graph.
+ if project not in SETA_PROJECTS:
+ logger.debug("SETA is not enabled for project `{}`".format(project))
+ return []
+
+ logger.debug("Querying SETA service for low-value tasks on {}".format(project))
+ low_value_tasks = []
+
+ url = SETA_ENDPOINT % project
+ # Try to fetch the SETA data twice, falling back to an empty list of low value tasks.
+ # There are 10 seconds between each try.
+ try:
+ logger.debug("Retrieving low-value jobs list from SETA")
+ response = retry(requests.get, attempts=2, sleeptime=10,
+ args=(url, ),
+ kwargs={'timeout': 5, 'headers': headers})
+ task_list = json.loads(response.content).get('jobtypes', '')
+ if len(task_list) > 0:
+ low_value_tasks = task_list.values()[0]
+
+ # Bug 1315145, disable SETA for tier-1 platforms until backfill is implemented.
+ low_value_tasks = [x for x in low_value_tasks if x.find('debug') == -1]
+ low_value_tasks = [x for x in low_value_tasks if x.find('asan') == -1]
+
+ # In the event of request times out, requests will raise a TimeoutError.
+ except exceptions.Timeout:
+ logger.warning("SETA server is timeout, we will treat all test tasks as high value.")
+
+ # In the event of a network problem (e.g. DNS failure, refused connection, etc),
+ # requests will raise a ConnectionError.
+ except exceptions.ConnectionError:
+ logger.warning("SETA server is timeout, we will treat all test tasks as high value.")
+
+ # In the event of the rare invalid HTTP response(e.g 404, 401),
+ # requests will raise an HTTPError exception
+ except exceptions.HTTPError:
+ logger.warning("We got bad Http response from ouija,"
+ " we will treat all test tasks as high value.")
+
+ # We just print the error out as a debug message if we failed to catch the exception above
+ except exceptions.RequestException as error:
+ logger.warning(error)
+
+ # When we get invalid JSON (i.e. 500 error), it results in a ValueError (bug 1313426)
+ except ValueError as error:
+ logger.warning("Invalid JSON, possible server error: {}".format(error))
+
+ return low_value_tasks
+
+ def is_low_value_task(self, label, project):
+ # cache the low value tasks per project to avoid repeated SETA server queries
+ if project not in self.low_value_tasks:
+ self.low_value_tasks[project] = self.query_low_value_tasks(project)
+ return label in self.low_value_tasks[project]
+
+# create a single instance of this class, and expose its `is_low_value_task`
+# bound method as a module-level function
+is_low_value_task = SETA().is_low_value_task
diff --git a/taskcluster/taskgraph/util/templates.py b/taskcluster/taskgraph/util/templates.py
new file mode 100644
index 000000000..97620fa75
--- /dev/null
+++ b/taskcluster/taskgraph/util/templates.py
@@ -0,0 +1,155 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+
+import pystache
+import yaml
+import copy
+
+# Key used in template inheritance...
+INHERITS_KEY = '$inherits'
+
+
+def merge_to(source, dest):
+ '''
+ Merge dict and arrays (override scalar values)
+
+ Keys from source override keys from dest, and elements from lists in source
+ are appended to lists in dest.
+
+ :param dict source: to copy from
+ :param dict dest: to copy to (modified in place)
+ '''
+
+ for key, value in source.items():
+ # Override mismatching or empty types
+ if type(value) != type(dest.get(key)): # noqa
+ dest[key] = source[key]
+ continue
+
+ # Merge dict
+ if isinstance(value, dict):
+ merge_to(value, dest[key])
+ continue
+
+ if isinstance(value, list):
+ dest[key] = dest[key] + source[key]
+ continue
+
+ dest[key] = source[key]
+
+ return dest
+
+
+def merge(*objects):
+ '''
+ Merge the given objects, using the semantics described for merge_to, with
+ objects later in the list taking precedence. From an inheritance
+ perspective, "parents" should be listed before "children".
+
+ Returns the result without modifying any arguments.
+ '''
+ if len(objects) == 1:
+ return copy.deepcopy(objects[0])
+ return merge_to(objects[-1], merge(*objects[:-1]))
+
+
+class TemplatesException(Exception):
+ pass
+
+
+class Templates():
+ '''
+ The taskcluster integration makes heavy use of yaml to describe tasks this
+ class handles the loading/rendering.
+ '''
+
+ def __init__(self, root):
+ '''
+ Initialize the template render.
+
+ :param str root: Root path where to load yaml files.
+ '''
+ if not root:
+ raise TemplatesException('Root is required')
+
+ if not os.path.isdir(root):
+ raise TemplatesException('Root must be a directory')
+
+ self.root = root
+
+ def _inherits(self, path, obj, properties, seen):
+ blueprint = obj.pop(INHERITS_KEY)
+ seen.add(path)
+
+ # Resolve the path here so we can detect circular references.
+ template = self.resolve_path(blueprint.get('from'))
+ variables = blueprint.get('variables', {})
+
+ # Passed parameters override anything in the task itself.
+ for key in properties:
+ variables[key] = properties[key]
+
+ if not template:
+ msg = '"{}" inheritance template missing'.format(path)
+ raise TemplatesException(msg)
+
+ if template in seen:
+ msg = 'Error while handling "{}" in "{}" circular template' + \
+ 'inheritance seen \n {}'
+ raise TemplatesException(msg.format(path, template, seen))
+
+ try:
+ out = self.load(template, variables, seen)
+ except TemplatesException as e:
+ msg = 'Error expanding parent ("{}") of "{}" original error {}'
+ raise TemplatesException(msg.format(template, path, str(e)))
+
+ # Anything left in obj is merged into final results (and overrides)
+ return merge_to(obj, out)
+
+ def render(self, path, content, parameters, seen):
+ '''
+ Renders a given yaml string.
+
+ :param str path: used to prevent infinite recursion in inheritance.
+ :param str content: Of yaml file.
+ :param dict parameters: For mustache templates.
+ :param set seen: Seen files (used for inheritance)
+ '''
+ content = pystache.render(content, parameters)
+ result = yaml.load(content)
+
+ # In addition to the usual template logic done by mustache we also
+ # handle special '$inherit' dict keys.
+ if isinstance(result, dict) and INHERITS_KEY in result:
+ return self._inherits(path, result, parameters, seen)
+
+ return result
+
+ def resolve_path(self, path):
+ return os.path.join(self.root, path)
+
+ def load(self, path, parameters=None, seen=None):
+ '''
+ Load an render the given yaml path.
+
+ :param str path: Location of yaml file to load (relative to root).
+ :param dict parameters: To template yaml file with.
+ '''
+ seen = seen or set()
+
+ if not path:
+ raise TemplatesException('path is required')
+
+ path = self.resolve_path(path)
+
+ if not os.path.isfile(path):
+ raise TemplatesException('"{}" is not a file'.format(path))
+
+ content = open(path).read()
+ return self.render(path, content, parameters, seen)
diff --git a/taskcluster/taskgraph/util/time.py b/taskcluster/taskgraph/util/time.py
new file mode 100644
index 000000000..160aaa70c
--- /dev/null
+++ b/taskcluster/taskgraph/util/time.py
@@ -0,0 +1,114 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Python port of the ms.js node module this is not a direct port some things are
+# more complicated or less precise and we lean on time delta here.
+
+import re
+import datetime
+
+PATTERN = re.compile(
+ '((?:\d+)?\.?\d+) *([a-z]+)'
+)
+
+
+def seconds(value):
+ return datetime.timedelta(seconds=int(value))
+
+
+def minutes(value):
+ return datetime.timedelta(minutes=int(value))
+
+
+def hours(value):
+ return datetime.timedelta(hours=int(value))
+
+
+def days(value):
+ return datetime.timedelta(days=int(value))
+
+
+def months(value):
+ # See warning in years(), below
+ return datetime.timedelta(days=int(value) * 30)
+
+
+def years(value):
+ # Warning here "years" are vague don't use this for really sensitive date
+ # computation the idea is to give you a absolute amount of time in the
+ # future which is not the same thing as "precisely on this date next year"
+ return datetime.timedelta(days=int(value) * 365)
+
+ALIASES = {}
+ALIASES['seconds'] = ALIASES['second'] = ALIASES['s'] = seconds
+ALIASES['minutes'] = ALIASES['minute'] = ALIASES['min'] = minutes
+ALIASES['hours'] = ALIASES['hour'] = ALIASES['h'] = hours
+ALIASES['days'] = ALIASES['day'] = ALIASES['d'] = days
+ALIASES['months'] = ALIASES['month'] = ALIASES['mo'] = months
+ALIASES['years'] = ALIASES['year'] = ALIASES['y'] = years
+
+
+class InvalidString(Exception):
+ pass
+
+
+class UnknownTimeMeasurement(Exception):
+ pass
+
+
+def value_of(input_str):
+ '''
+ Convert a string to a json date in the future
+ :param str input_str: (ex: 1d, 2d, 6years, 2 seconds)
+ :returns: Unit given in seconds
+ '''
+
+ matches = PATTERN.search(input_str)
+
+ if matches is None or len(matches.groups()) < 2:
+ raise InvalidString("'{}' is invalid string".format(input_str))
+
+ value, unit = matches.groups()
+
+ if unit not in ALIASES:
+ raise UnknownTimeMeasurement(
+ '{} is not a valid time measure use one of {}'.format(
+ unit,
+ sorted(ALIASES.keys())
+ )
+ )
+
+ return ALIASES[unit](value)
+
+
+def json_time_from_now(input_str, now=None, datetime_format=False):
+ '''
+ :param str input_str: Input string (see value of)
+ :param datetime now: Optionally set the definition of `now`
+ :param boolean datetime_format: Set `True` to get a `datetime` output
+ :returns: JSON string representation of time in future.
+ '''
+
+ if now is None:
+ now = datetime.datetime.utcnow()
+
+ time = now + value_of(input_str)
+
+ if datetime_format is True:
+ return time
+ else:
+ # Sorta a big hack but the json schema validator for date does not like the
+ # ISO dates until 'Z' (for timezone) is added...
+ return time.isoformat() + 'Z'
+
+
+def current_json_time(datetime_format=False):
+ '''
+ :param boolean datetime_format: Set `True` to get a `datetime` output
+ :returns: JSON string representation of the current time.
+ '''
+ if datetime_format is True:
+ return datetime.datetime.utcnow()
+ else:
+ return datetime.datetime.utcnow().isoformat() + 'Z'
diff --git a/taskcluster/taskgraph/util/treeherder.py b/taskcluster/taskgraph/util/treeherder.py
new file mode 100644
index 000000000..e66db582f
--- /dev/null
+++ b/taskcluster/taskgraph/util/treeherder.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+import re
+
+
+def split_symbol(treeherder_symbol):
+ """Split a symbol expressed as grp(sym) into its two parts. If no group is
+ given, the returned group is '?'"""
+ groupSymbol = '?'
+ symbol = treeherder_symbol
+ if '(' in symbol:
+ groupSymbol, symbol = re.match(r'([^(]*)\(([^)]*)\)', symbol).groups()
+ return groupSymbol, symbol
+
+
+def join_symbol(group, symbol):
+ """Perform the reverse of split_symbol, combining the given group and
+ symbol. If the group is '?', then it is omitted."""
+ if group == '?':
+ return symbol
+ return '{}({})'.format(group, symbol)
diff --git a/taskcluster/taskgraph/util/yaml.py b/taskcluster/taskgraph/util/yaml.py
new file mode 100644
index 000000000..4e541b775
--- /dev/null
+++ b/taskcluster/taskgraph/util/yaml.py
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import os
+import yaml
+
+
+def load_yaml(path, name):
+ """Convenience function to load a YAML file in the given path. This is
+ useful for loading kind configuration files from the kind path."""
+ filename = os.path.join(path, name)
+ with open(filename, "rb") as f:
+ return yaml.load(f)