summaryrefslogtreecommitdiffstats
path: root/taskcluster/taskgraph/task
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /taskcluster/taskgraph/task
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'taskcluster/taskgraph/task')
-rw-r--r--taskcluster/taskgraph/task/__init__.py0
-rw-r--r--taskcluster/taskgraph/task/base.py108
-rw-r--r--taskcluster/taskgraph/task/docker_image.py130
-rw-r--r--taskcluster/taskgraph/task/post_build.py53
-rw-r--r--taskcluster/taskgraph/task/signing.py64
-rw-r--r--taskcluster/taskgraph/task/test.py112
-rw-r--r--taskcluster/taskgraph/task/transform.py109
7 files changed, 576 insertions, 0 deletions
diff --git a/taskcluster/taskgraph/task/__init__.py b/taskcluster/taskgraph/task/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/taskcluster/taskgraph/task/__init__.py
diff --git a/taskcluster/taskgraph/task/base.py b/taskcluster/taskgraph/task/base.py
new file mode 100644
index 000000000..2d9cbf5d9
--- /dev/null
+++ b/taskcluster/taskgraph/task/base.py
@@ -0,0 +1,108 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import abc
+
+
+class Task(object):
+ """
+ Representation of a task in a TaskGraph. Each Task has, at creation:
+
+ - kind: the name of the task kind
+ - label; the label for this task
+ - attributes: a dictionary of attributes for this task (used for filtering)
+ - task: the task definition (JSON-able dictionary)
+
+ And later, as the task-graph processing proceeds:
+
+ - task_id -- TaskCluster taskId under which this task will be created
+ - optimized -- true if this task need not be performed
+
+ A kind represents a collection of tasks that share common characteristics.
+ For example, all build jobs. Each instance of a kind is intialized with a
+ path from which it draws its task configuration. The instance is free to
+ store as much local state as it needs.
+ """
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, kind, label, attributes, task):
+ self.kind = kind
+ self.label = label
+ self.attributes = attributes
+ self.task = task
+
+ self.task_id = None
+ self.optimized = False
+
+ self.attributes['kind'] = kind
+
+ def __eq__(self, other):
+ return self.kind == other.kind and \
+ self.label == other.label and \
+ self.attributes == other.attributes and \
+ self.task == other.task and \
+ self.task_id == other.task_id
+
+ @classmethod
+ @abc.abstractmethod
+ def load_tasks(cls, kind, path, config, parameters, loaded_tasks):
+ """
+ Load the tasks for a given kind.
+
+ The `kind` is the name of the kind; the configuration for that kind
+ named this class.
+
+ The `path` is the path to the configuration directory for the kind. This
+ can be used to load extra data, templates, etc.
+
+ The `parameters` give details on which to base the task generation.
+ See `taskcluster/docs/parameters.rst` for details.
+
+ At the time this method is called, all kinds on which this kind depends
+ (that is, specified in the `kind-dependencies` key in `self.config`
+ have already loaded their tasks, and those tasks are available in
+ the list `loaded_tasks`.
+
+ The return value is a list of Task instances.
+ """
+
+ @abc.abstractmethod
+ def get_dependencies(self, taskgraph):
+ """
+ Get the set of task labels this task depends on, by querying the full
+ task set, given as `taskgraph`.
+
+ Returns a list of (task_label, dependency_name) pairs describing the
+ dependencies.
+ """
+
+ def optimize(self, params):
+ """
+ Determine whether this task can be optimized, and if it can, what taskId
+ it should be replaced with.
+
+ The return value is a tuple `(optimized, taskId)`. If `optimized` is
+ true, then the task will be optimized (in other words, not included in
+ the task graph). If the second argument is a taskid, then any
+ dependencies on this task will isntead depend on that taskId. It is an
+ error to return no taskId for a task on which other tasks depend.
+
+ The default never optimizes.
+ """
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ """
+ Given a data structure as produced by taskgraph.to_json, re-construct
+ the original Task object. This is used to "resume" the task-graph
+ generation process, for example in Action tasks.
+ """
+ return cls(
+ kind=task_dict['attributes']['kind'],
+ label=task_dict['label'],
+ attributes=task_dict['attributes'],
+ task=task_dict['task'])
diff --git a/taskcluster/taskgraph/task/docker_image.py b/taskcluster/taskgraph/task/docker_image.py
new file mode 100644
index 000000000..fd67c4832
--- /dev/null
+++ b/taskcluster/taskgraph/task/docker_image.py
@@ -0,0 +1,130 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import json
+import os
+import urllib2
+
+from . import base
+from taskgraph.util.docker import (
+ docker_image,
+ generate_context_hash,
+ INDEX_PREFIX,
+)
+from taskgraph.util.templates import Templates
+
+logger = logging.getLogger(__name__)
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+INDEX_URL = 'https://index.taskcluster.net/v1/task/{}'
+
+
+class DockerImageTask(base.Task):
+
+ def __init__(self, *args, **kwargs):
+ self.index_paths = kwargs.pop('index_paths')
+ super(DockerImageTask, self).__init__(*args, **kwargs)
+
+ def __eq__(self, other):
+ return super(DockerImageTask, self).__eq__(other) and \
+ self.index_paths == other.index_paths
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, params, loaded_tasks):
+ parameters = {
+ 'pushlog_id': params.get('pushlog_id', 0),
+ 'pushdate': params['moz_build_date'],
+ 'pushtime': params['moz_build_date'][8:],
+ 'year': params['moz_build_date'][0:4],
+ 'month': params['moz_build_date'][4:6],
+ 'day': params['moz_build_date'][6:8],
+ 'project': params['project'],
+ 'docker_image': docker_image,
+ 'base_repository': params['base_repository'] or params['head_repository'],
+ 'head_repository': params['head_repository'],
+ 'head_ref': params['head_ref'] or params['head_rev'],
+ 'head_rev': params['head_rev'],
+ 'owner': params['owner'],
+ 'level': params['level'],
+ 'source': '{repo}file/{rev}/taskcluster/ci/docker-image/image.yml'
+ .format(repo=params['head_repository'], rev=params['head_rev']),
+ 'index_image_prefix': INDEX_PREFIX,
+ 'artifact_path': 'public/image.tar.zst',
+ }
+
+ tasks = []
+ templates = Templates(path)
+ for image_name, image_symbol in config['images'].iteritems():
+ context_path = os.path.join('testing', 'docker', image_name)
+ context_hash = generate_context_hash(GECKO, context_path, image_name)
+
+ image_parameters = dict(parameters)
+ image_parameters['image_name'] = image_name
+ image_parameters['context_hash'] = context_hash
+
+ image_task = templates.load('image.yml', image_parameters)
+ attributes = {'image_name': image_name}
+
+ # unique symbol for different docker image
+ if 'extra' in image_task['task']:
+ image_task['task']['extra']['treeherder']['symbol'] = image_symbol
+
+ # As an optimization, if the context hash exists for a high level, that image
+ # task ID will be used. The reasoning behind this is that eventually everything ends
+ # up on level 3 at some point if most tasks use this as a common image
+ # for a given context hash, a worker within Taskcluster does not need to contain
+ # the same image per branch.
+ index_paths = ['{}.level-{}.{}.hash.{}'.format(
+ INDEX_PREFIX, level, image_name, context_hash)
+ for level in range(int(params['level']), 4)]
+
+ tasks.append(cls(kind, 'build-docker-image-' + image_name,
+ task=image_task['task'], attributes=attributes,
+ index_paths=index_paths))
+
+ return tasks
+
+ def get_dependencies(self, taskgraph):
+ return []
+
+ def optimize(self, params):
+ for index_path in self.index_paths:
+ try:
+ url = INDEX_URL.format(index_path)
+ existing_task = json.load(urllib2.urlopen(url))
+ # Only return the task ID if the artifact exists for the indexed
+ # task. Otherwise, continue on looking at each of the branches. Method
+ # continues trying other branches in case mozilla-central has an expired
+ # artifact, but 'project' might not. Only return no task ID if all
+ # branches have been tried
+ request = urllib2.Request(
+ ARTIFACT_URL.format(existing_task['taskId'], 'public/image.tar.zst'))
+ request.get_method = lambda: 'HEAD'
+ urllib2.urlopen(request)
+
+ # HEAD success on the artifact is enough
+ return True, existing_task['taskId']
+ except urllib2.HTTPError:
+ pass
+
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ # Generating index_paths for optimization
+ imgMeta = task_dict['task']['extra']['imageMeta']
+ image_name = imgMeta['imageName']
+ context_hash = imgMeta['contextHash']
+ index_paths = ['{}.level-{}.{}.hash.{}'.format(
+ INDEX_PREFIX, level, image_name, context_hash)
+ for level in range(int(imgMeta['level']), 4)]
+ docker_image_task = cls(kind='docker-image',
+ label=task_dict['label'],
+ attributes=task_dict['attributes'],
+ task=task_dict['task'],
+ index_paths=index_paths)
+ return docker_image_task
diff --git a/taskcluster/taskgraph/task/post_build.py b/taskcluster/taskgraph/task/post_build.py
new file mode 100644
index 000000000..09c76c44a
--- /dev/null
+++ b/taskcluster/taskgraph/task/post_build.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public License,
+# v. 2.0. If a copy of the MPL was not distributed with this file, You can
+# obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import copy
+import logging
+
+from . import transform
+from ..util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+
+class PostBuildTask(transform.TransformTask):
+ """
+ A task implementing a post-build job. These depend on jobs and perform
+ various followup tasks after a build has completed.
+
+ The `only-for-build-platforms` kind configuration, if specified, will limit
+ the build platforms for which a post-build task will be created.
+
+ The `job-template' kind configuration points to a yaml file which will
+ be used to create the input to the transforms. It will have added to it
+ keys `build-label`, the label for the build task, and `build-platform`, its
+ platform.
+ """
+
+ @classmethod
+ def get_inputs(cls, kind, path, config, params, loaded_tasks):
+ if config.get('kind-dependencies', []) != ["build"]:
+ raise Exception("PostBuildTask kinds must depend on builds")
+
+ only_platforms = config.get('only-for-build-platforms')
+ prototype = load_yaml(path, config.get('job-template'))
+
+ for task in loaded_tasks:
+ if task.kind != 'build':
+ continue
+
+ build_platform = task.attributes.get('build_platform')
+ build_type = task.attributes.get('build_type')
+ if not build_platform or not build_type:
+ continue
+ platform = "{}/{}".format(build_platform, build_type)
+ if only_platforms and platform not in only_platforms:
+ continue
+
+ post_task = copy.deepcopy(prototype)
+ post_task['build-label'] = task.label
+ post_task['build-platform'] = platform
+ yield post_task
diff --git a/taskcluster/taskgraph/task/signing.py b/taskcluster/taskgraph/task/signing.py
new file mode 100644
index 000000000..a2a9ae3d6
--- /dev/null
+++ b/taskcluster/taskgraph/task/signing.py
@@ -0,0 +1,64 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import os
+
+from . import base
+from taskgraph.util.templates import Templates
+
+
+logger = logging.getLogger(__name__)
+GECKO = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))
+ARTIFACT_URL = 'https://queue.taskcluster.net/v1/task/{}/artifacts/{}'
+INDEX_URL = 'https://index.taskcluster.net/v1/task/{}'
+
+
+class SigningTask(base.Task):
+
+ def __init__(self, kind, name, task, attributes):
+ self.unsigned_artifact_label = task['unsigned-task']['label']
+ super(SigningTask, self).__init__(kind, name, task=task['task'],
+ attributes=attributes)
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, params, loaded_tasks):
+ root = os.path.abspath(path)
+
+ tasks = []
+ for filename in config.get('jobs-from', []):
+ templates = Templates(root)
+ jobs = templates.load(filename, {})
+
+ for name, job in jobs.iteritems():
+ for artifact in job['unsigned-task']['artifacts']:
+ url = ARTIFACT_URL.format('<{}>'.format('unsigned-artifact'), artifact)
+ job['task']['payload']['unsignedArtifacts'].append({
+ 'task-reference': url
+ })
+ attributes = job.setdefault('attributes', {})
+ attributes.update({'kind': 'signing'})
+ tasks.append(cls(kind, name, job, attributes=attributes))
+
+ return tasks
+
+ def get_dependencies(self, taskgraph):
+ return [(self.unsigned_artifact_label, 'unsigned-artifact')]
+
+ def optimize(self, params):
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ unsigned_task_label = task_dict['dependencies']['unsigned-artifact']
+ task_dict['unsigned-task'] = {
+ 'label': unsigned_task_label
+ }
+ signing_task = cls(kind='build-signing',
+ name=task_dict['label'],
+ attributes=task_dict['attributes'],
+ task=task_dict)
+ return signing_task
diff --git a/taskcluster/taskgraph/task/test.py b/taskcluster/taskgraph/task/test.py
new file mode 100644
index 000000000..928f32a5a
--- /dev/null
+++ b/taskcluster/taskgraph/task/test.py
@@ -0,0 +1,112 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import copy
+import logging
+
+from . import transform
+from ..util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+
+class TestTask(transform.TransformTask):
+ """
+ A task implementing a Gecko test.
+ """
+
+ @classmethod
+ def get_inputs(cls, kind, path, config, params, loaded_tasks):
+
+ # the kind on which this one depends
+ if len(config.get('kind-dependencies', [])) != 1:
+ raise Exception("TestTask kinds must have exactly one item in kind-dependencies")
+ dep_kind = config['kind-dependencies'][0]
+
+ # get build tasks, keyed by build platform
+ builds_by_platform = cls.get_builds_by_platform(dep_kind, loaded_tasks)
+
+ # get the test platforms for those build tasks
+ test_platforms_cfg = load_yaml(path, 'test-platforms.yml')
+ test_platforms = cls.get_test_platforms(test_platforms_cfg, builds_by_platform)
+
+ # expand the test sets for each of those platforms
+ test_sets_cfg = load_yaml(path, 'test-sets.yml')
+ test_platforms = cls.expand_tests(test_sets_cfg, test_platforms)
+
+ # load the test descriptions
+ test_descriptions = load_yaml(path, 'tests.yml')
+
+ # generate all tests for all test platforms
+ for test_platform_name, test_platform in test_platforms.iteritems():
+ for test_name in test_platform['test-names']:
+ test = copy.deepcopy(test_descriptions[test_name])
+ test['build-platform'] = test_platform['build-platform']
+ test['test-platform'] = test_platform_name
+ test['build-label'] = test_platform['build-label']
+ test['test-name'] = test_name
+
+ logger.debug("Generating tasks for {} test {} on platform {}".format(
+ kind, test_name, test['test-platform']))
+ yield test
+
+ @classmethod
+ def get_builds_by_platform(cls, dep_kind, loaded_tasks):
+ """Find the build tasks on which tests will depend, keyed by
+ platform/type. Returns a dictionary mapping build platform to task
+ label."""
+ builds_by_platform = {}
+ for task in loaded_tasks:
+ if task.kind != dep_kind:
+ continue
+
+ build_platform = task.attributes.get('build_platform')
+ build_type = task.attributes.get('build_type')
+ if not build_platform or not build_type:
+ continue
+ platform = "{}/{}".format(build_platform, build_type)
+ if platform in builds_by_platform:
+ raise Exception("multiple build jobs for " + platform)
+ builds_by_platform[platform] = task.label
+ return builds_by_platform
+
+ @classmethod
+ def get_test_platforms(cls, test_platforms_cfg, builds_by_platform):
+ """Get the test platforms for which test tasks should be generated,
+ based on the available build platforms. Returns a dictionary mapping
+ test platform to {test-set, build-platform, build-label}."""
+ test_platforms = {}
+ for test_platform, cfg in test_platforms_cfg.iteritems():
+ build_platform = cfg['build-platform']
+ if build_platform not in builds_by_platform:
+ logger.warning(
+ "No build task with platform {}; ignoring test platform {}".format(
+ build_platform, test_platform))
+ continue
+ test_platforms[test_platform] = {
+ 'test-set': cfg['test-set'],
+ 'build-platform': build_platform,
+ 'build-label': builds_by_platform[build_platform],
+ }
+ return test_platforms
+
+ @classmethod
+ def expand_tests(cls, test_sets_cfg, test_platforms):
+ """Expand the test sets in `test_platforms` out to sets of test names.
+ Returns a dictionary like `get_test_platforms`, with an additional
+ `test-names` key for each test platform, containing a set of test
+ names."""
+ rv = {}
+ for test_platform, cfg in test_platforms.iteritems():
+ test_set = cfg['test-set']
+ if test_set not in test_sets_cfg:
+ raise Exception(
+ "Test set '{}' for test platform {} is not defined".format(
+ test_set, test_platform))
+ test_names = test_sets_cfg[test_set]
+ rv[test_platform] = cfg.copy()
+ rv[test_platform]['test-names'] = test_names
+ return rv
diff --git a/taskcluster/taskgraph/task/transform.py b/taskcluster/taskgraph/task/transform.py
new file mode 100644
index 000000000..8183254a0
--- /dev/null
+++ b/taskcluster/taskgraph/task/transform.py
@@ -0,0 +1,109 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import itertools
+
+from . import base
+from .. import files_changed
+from ..util.python_path import find_object
+from ..util.templates import merge
+from ..util.yaml import load_yaml
+from ..util.seta import is_low_value_task
+
+from ..transforms.base import TransformSequence, TransformConfig
+
+logger = logging.getLogger(__name__)
+
+
+class TransformTask(base.Task):
+ """
+ Tasks of this class are generated by applying transformations to a sequence
+ of input entities. By default, it gets those inputs from YAML data in the
+ kind directory, but subclasses may override `get_inputs` to produce them in
+ some other way.
+ """
+
+ @classmethod
+ def get_inputs(cls, kind, path, config, params, loaded_tasks):
+ """
+ Get the input elements that will be transformed into tasks. The
+ elements themselves are free-form, and become the input to the first
+ transform.
+
+ By default, this reads jobs from the `jobs` key, or from yaml files
+ named by `jobs-from`. The entities are read from mappings, and the
+ keys to those mappings are added in the `name` key of each entity.
+
+ If there is a `job-defaults` config, then every job is merged with it.
+ This provides a simple way to set default values for all jobs of a
+ kind. More complex defaults should be implemented with custom
+ transforms.
+
+ This method can be overridden in subclasses that need to perform more
+ complex calculations to generate the list of inputs.
+ """
+ def jobs():
+ defaults = config.get('job-defaults')
+ jobs = config.get('jobs', {}).iteritems()
+ jobs_from = itertools.chain.from_iterable(
+ load_yaml(path, filename).iteritems()
+ for filename in config.get('jobs-from', {}))
+ for name, job in itertools.chain(jobs, jobs_from):
+ if defaults:
+ job = merge(defaults, job)
+ yield name, job
+
+ for name, job in jobs():
+ job['name'] = name
+ logger.debug("Generating tasks for {} {}".format(kind, name))
+ yield job
+
+ @classmethod
+ def load_tasks(cls, kind, path, config, params, loaded_tasks):
+ inputs = cls.get_inputs(kind, path, config, params, loaded_tasks)
+
+ transforms = TransformSequence()
+ for xform_path in config['transforms']:
+ transform = find_object(xform_path)
+ transforms.add(transform)
+
+ # perform the transformations
+ trans_config = TransformConfig(kind, path, config, params)
+ tasks = [cls(kind, t) for t in transforms(trans_config, inputs)]
+ return tasks
+
+ def __init__(self, kind, task):
+ self.dependencies = task['dependencies']
+ self.when = task['when']
+ super(TransformTask, self).__init__(kind, task['label'],
+ task['attributes'], task['task'])
+
+ def get_dependencies(self, taskgraph):
+ return [(label, name) for name, label in self.dependencies.items()]
+
+ def optimize(self, params):
+ if 'files-changed' in self.when:
+ changed = files_changed.check(
+ params, self.when['files-changed'])
+ if not changed:
+ logger.debug('no files found matching a pattern in `when.files-changed` for ' +
+ self.label)
+ return True, None
+
+ # we would like to return 'False, None' while it's high_value_task
+ # and we wouldn't optimize it. Otherwise, it will return 'True, None'
+ if is_low_value_task(self.label, params.get('project')):
+ # Always optimize away low-value tasks
+ return True, None
+ else:
+ return False, None
+
+ @classmethod
+ def from_json(cls, task_dict):
+ # when reading back from JSON, we lose the "when" information
+ task_dict['when'] = {}
+ return cls(task_dict['attributes']['kind'], task_dict)