summaryrefslogtreecommitdiffstats
path: root/testing/tools
diff options
context:
space:
mode:
Diffstat (limited to 'testing/tools')
-rw-r--r--testing/tools/autotry/__init__.py0
-rw-r--r--testing/tools/autotry/autotry.py586
-rw-r--r--testing/tools/fileid/linux_fileid.cpp41
-rw-r--r--testing/tools/fileid/mac_fileid.cpp52
-rw-r--r--testing/tools/fileid/moz.build34
-rw-r--r--testing/tools/fileid/win_fileid.cpp90
-rwxr-xr-xtesting/tools/grabber/getpages.sh54
-rw-r--r--testing/tools/iceserver/iceserver.py759
-rw-r--r--testing/tools/mach_test_package_bootstrap.py194
-rw-r--r--testing/tools/minidumpwriter/minidumpwriter.cpp59
-rw-r--r--testing/tools/minidumpwriter/moz.build19
-rw-r--r--testing/tools/proxyserver/proxyserver.py269
-rw-r--r--testing/tools/screenshot/gdk-screenshot.cpp169
-rw-r--r--testing/tools/screenshot/moz.build26
-rw-r--r--testing/tools/screenshot/win32-screenshot.cpp116
-rw-r--r--testing/tools/websocketprocessbridge/websocketprocessbridge.py106
-rw-r--r--testing/tools/websocketprocessbridge/websocketprocessbridge_requirements.txt13
17 files changed, 2587 insertions, 0 deletions
diff --git a/testing/tools/autotry/__init__.py b/testing/tools/autotry/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/tools/autotry/__init__.py
diff --git a/testing/tools/autotry/autotry.py b/testing/tools/autotry/autotry.py
new file mode 100644
index 000000000..7b0350b61
--- /dev/null
+++ b/testing/tools/autotry/autotry.py
@@ -0,0 +1,586 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import itertools
+import os
+import re
+import subprocess
+import sys
+import which
+
+from collections import defaultdict
+
+import ConfigParser
+
+
+def arg_parser():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('paths', nargs='*', help='Paths to search for tests to run on try.')
+ parser.add_argument('-b', '--build', dest='builds', default='do',
+ help='Build types to run (d for debug, o for optimized).')
+ parser.add_argument('-p', '--platform', dest='platforms', action='append',
+ help='Platforms to run (required if not found in the environment as AUTOTRY_PLATFORM_HINT).')
+ parser.add_argument('-u', '--unittests', dest='tests', action='append',
+ help='Test suites to run in their entirety.')
+ parser.add_argument('-t', '--talos', dest='talos', action='append',
+ help='Talos suites to run.')
+ parser.add_argument('--tag', dest='tags', action='append',
+ help='Restrict tests to the given tag (may be specified multiple times).')
+ parser.add_argument('--and', action='store_true', dest='intersection',
+ help='When -u and paths are supplied run only the intersection of the tests specified by the two arguments.')
+ parser.add_argument('--no-push', dest='push', action='store_false',
+ help='Do not push to try as a result of running this command (if '
+ 'specified this command will only print calculated try '
+ 'syntax and selection info).')
+ parser.add_argument('--save', dest='save', action='store',
+ help='Save the command line arguments for future use with --preset.')
+ parser.add_argument('--preset', dest='load', action='store',
+ help='Load a saved set of arguments. Additional arguments will override saved ones.')
+ parser.add_argument('--list', action='store_true',
+ help='List all saved try strings')
+ parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False,
+ help='Print detailed information about the resulting test selection '
+ 'and commands performed.')
+ for arg, opts in AutoTry.pass_through_arguments.items():
+ parser.add_argument(arg, **opts)
+ return parser
+
+class TryArgumentTokenizer(object):
+ symbols = [("seperator", ","),
+ ("list_start", "\["),
+ ("list_end", "\]"),
+ ("item", "([^,\[\]\s][^,\[\]]+)"),
+ ("space", "\s+")]
+ token_re = re.compile("|".join("(?P<%s>%s)" % item for item in symbols))
+
+ def tokenize(self, data):
+ for match in self.token_re.finditer(data):
+ symbol = match.lastgroup
+ data = match.group(symbol)
+ if symbol == "space":
+ pass
+ else:
+ yield symbol, data
+
+class TryArgumentParser(object):
+ """Simple three-state parser for handling expressions
+ of the from "foo[sub item, another], bar,baz". This takes
+ input from the TryArgumentTokenizer and runs through a small
+ state machine, returning a dictionary of {top-level-item:[sub_items]}
+ i.e. the above would result in
+ {"foo":["sub item", "another"], "bar": [], "baz": []}
+ In the case of invalid input a ValueError is raised."""
+
+ EOF = object()
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.tokens = None
+ self.current_item = None
+ self.data = {}
+ self.token = None
+ self.state = None
+
+ def parse(self, tokens):
+ self.reset()
+ self.tokens = tokens
+ self.consume()
+ self.state = self.item_state
+ while self.token[0] != self.EOF:
+ self.state()
+ return self.data
+
+ def consume(self):
+ try:
+ self.token = self.tokens.next()
+ except StopIteration:
+ self.token = (self.EOF, None)
+
+ def expect(self, *types):
+ if self.token[0] not in types:
+ raise ValueError("Error parsing try string, unexpected %s" % (self.token[0]))
+
+ def item_state(self):
+ self.expect("item")
+ value = self.token[1].strip()
+ if value not in self.data:
+ self.data[value] = []
+ self.current_item = value
+ self.consume()
+ if self.token[0] == "seperator":
+ self.consume()
+ elif self.token[0] == "list_start":
+ self.consume()
+ self.state = self.subitem_state
+ elif self.token[0] == self.EOF:
+ pass
+ else:
+ raise ValueError
+
+ def subitem_state(self):
+ self.expect("item")
+ value = self.token[1].strip()
+ self.data[self.current_item].append(value)
+ self.consume()
+ if self.token[0] == "seperator":
+ self.consume()
+ elif self.token[0] == "list_end":
+ self.consume()
+ self.state = self.after_list_end_state
+ else:
+ raise ValueError
+
+ def after_list_end_state(self):
+ self.expect("seperator")
+ self.consume()
+ self.state = self.item_state
+
+def parse_arg(arg):
+ tokenizer = TryArgumentTokenizer()
+ parser = TryArgumentParser()
+ return parser.parse(tokenizer.tokenize(arg))
+
+class AutoTry(object):
+
+ # Maps from flavors to the job names needed to run that flavour
+ flavor_jobs = {
+ 'mochitest': ['mochitest-1', 'mochitest-e10s-1'],
+ 'xpcshell': ['xpcshell'],
+ 'chrome': ['mochitest-o'],
+ 'browser-chrome': ['mochitest-browser-chrome-1',
+ 'mochitest-e10s-browser-chrome-1'],
+ 'devtools-chrome': ['mochitest-devtools-chrome-1',
+ 'mochitest-e10s-devtools-chrome-1'],
+ 'crashtest': ['crashtest', 'crashtest-e10s'],
+ 'reftest': ['reftest', 'reftest-e10s'],
+ 'web-platform-tests': ['web-platform-tests-1'],
+ }
+
+ flavor_suites = {
+ "mochitest": "mochitests",
+ "xpcshell": "xpcshell",
+ "chrome": "mochitest-o",
+ "browser-chrome": "mochitest-bc",
+ "devtools-chrome": "mochitest-dt",
+ "crashtest": "crashtest",
+ "reftest": "reftest",
+ "web-platform-tests": "web-platform-tests",
+ }
+
+ compiled_suites = [
+ "cppunit",
+ "gtest",
+ "jittest",
+ ]
+
+ common_suites = [
+ "cppunit",
+ "crashtest",
+ "firefox-ui-functional",
+ "gtest",
+ "jittest",
+ "jsreftest",
+ "marionette",
+ "marionette-e10s",
+ "media-tests",
+ "mochitests",
+ "reftest",
+ "web-platform-tests",
+ "xpcshell",
+ ]
+
+ # Arguments we will accept on the command line and pass through to try
+ # syntax with no further intervention. The set is taken from
+ # http://trychooser.pub.build.mozilla.org with a few additions.
+ #
+ # Note that the meaning of store_false and store_true arguments is
+ # not preserved here, as we're only using these to echo the literal
+ # arguments to another consumer. Specifying either store_false or
+ # store_true here will have an equivalent effect.
+ pass_through_arguments = {
+ '--rebuild': {
+ 'action': 'store',
+ 'dest': 'rebuild',
+ 'help': 'Re-trigger all test jobs (up to 20 times)',
+ },
+ '--rebuild-talos': {
+ 'action': 'store',
+ 'dest': 'rebuild_talos',
+ 'help': 'Re-trigger all talos jobs',
+ },
+ '--interactive': {
+ 'action': 'store_true',
+ 'dest': 'interactive',
+ 'help': 'Allow ssh-like access to running test containers',
+ },
+ '--no-retry': {
+ 'action': 'store_true',
+ 'dest': 'no_retry',
+ 'help': 'Do not retrigger failed tests',
+ },
+ '--setenv': {
+ 'action': 'append',
+ 'dest': 'setenv',
+ 'help': 'Set the corresponding variable in the test environment for'
+ 'applicable harnesses.',
+ },
+ '-f': {
+ 'action': 'store_true',
+ 'dest': 'failure_emails',
+ 'help': 'Request failure emails only',
+ },
+ '--failure-emails': {
+ 'action': 'store_true',
+ 'dest': 'failure_emails',
+ 'help': 'Request failure emails only',
+ },
+ '-e': {
+ 'action': 'store_true',
+ 'dest': 'all_emails',
+ 'help': 'Request all emails',
+ },
+ '--all-emails': {
+ 'action': 'store_true',
+ 'dest': 'all_emails',
+ 'help': 'Request all emails',
+ },
+ '--artifact': {
+ 'action': 'store_true',
+ 'dest': 'artifact',
+ 'help': 'Force artifact builds where possible.',
+ }
+ }
+
+ def __init__(self, topsrcdir, resolver_func, mach_context):
+ self.topsrcdir = topsrcdir
+ self._resolver_func = resolver_func
+ self._resolver = None
+ self.mach_context = mach_context
+
+ if os.path.exists(os.path.join(self.topsrcdir, '.hg')):
+ self._use_git = False
+ else:
+ self._use_git = True
+
+ @property
+ def resolver(self):
+ if self._resolver is None:
+ self._resolver = self._resolver_func()
+ return self._resolver
+
+ @property
+ def config_path(self):
+ return os.path.join(self.mach_context.state_dir, "autotry.ini")
+
+ def load_config(self, name):
+ config = ConfigParser.RawConfigParser()
+ success = config.read([self.config_path])
+ if not success:
+ return None
+
+ try:
+ data = config.get("try", name)
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return None
+
+ kwargs = vars(arg_parser().parse_args(self.split_try_string(data)))
+
+ return kwargs
+
+ def list_presets(self):
+ config = ConfigParser.RawConfigParser()
+ success = config.read([self.config_path])
+
+ data = []
+ if success:
+ try:
+ data = config.items("try")
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ pass
+
+ if not data:
+ print("No presets found")
+
+ for name, try_string in data:
+ print("%s: %s" % (name, try_string))
+
+ def split_try_string(self, data):
+ return re.findall(r'(?:\[.*?\]|\S)+', data)
+
+ def save_config(self, name, data):
+ assert data.startswith("try: ")
+ data = data[len("try: "):]
+
+ parser = ConfigParser.RawConfigParser()
+ parser.read([self.config_path])
+
+ if not parser.has_section("try"):
+ parser.add_section("try")
+
+ parser.set("try", name, data)
+
+ with open(self.config_path, "w") as f:
+ parser.write(f)
+
+ def paths_by_flavor(self, paths=None, tags=None):
+ paths_by_flavor = defaultdict(set)
+
+ if not (paths or tags):
+ return dict(paths_by_flavor)
+
+ tests = list(self.resolver.resolve_tests(paths=paths,
+ tags=tags))
+
+ for t in tests:
+ if t['flavor'] in self.flavor_suites:
+ flavor = t['flavor']
+ if 'subsuite' in t and t['subsuite'] == 'devtools':
+ flavor = 'devtools-chrome'
+
+ if flavor in ['crashtest', 'reftest']:
+ manifest_relpath = os.path.relpath(t['manifest'], self.topsrcdir)
+ paths_by_flavor[flavor].add(os.path.dirname(manifest_relpath))
+ elif 'dir_relpath' in t:
+ paths_by_flavor[flavor].add(t['dir_relpath'])
+ else:
+ file_relpath = os.path.relpath(t['path'], self.topsrcdir)
+ dir_relpath = os.path.dirname(file_relpath)
+ paths_by_flavor[flavor].add(dir_relpath)
+
+ for flavor, path_set in paths_by_flavor.items():
+ paths_by_flavor[flavor] = self.deduplicate_prefixes(path_set, paths)
+
+ return dict(paths_by_flavor)
+
+ def deduplicate_prefixes(self, path_set, input_paths):
+ # Removes paths redundant to test selection in the given path set.
+ # If a path was passed on the commandline that is the prefix of a
+ # path in our set, we only need to include the specified prefix to
+ # run the intended tests (every test in "layout/base" will run if
+ # "layout" is passed to the reftest harness).
+ removals = set()
+ additions = set()
+
+ for path in path_set:
+ full_path = path
+ while path:
+ path, _ = os.path.split(path)
+ if path in input_paths:
+ removals.add(full_path)
+ additions.add(path)
+
+ return additions | (path_set - removals)
+
+ def remove_duplicates(self, paths_by_flavor, tests):
+ rv = {}
+ for item in paths_by_flavor:
+ if self.flavor_suites[item] not in tests:
+ rv[item] = paths_by_flavor[item].copy()
+ return rv
+
+ def calc_try_syntax(self, platforms, tests, talos, builds, paths_by_flavor, tags,
+ extras, intersection):
+ parts = ["try:", "-b", builds, "-p", ",".join(platforms)]
+
+ suites = tests if not intersection else {}
+ paths = set()
+ for flavor, flavor_tests in paths_by_flavor.iteritems():
+ suite = self.flavor_suites[flavor]
+ if suite not in suites and (not intersection or suite in tests):
+ for job_name in self.flavor_jobs[flavor]:
+ for test in flavor_tests:
+ paths.add("%s:%s" % (flavor, test))
+ suites[job_name] = tests.get(suite, [])
+
+ if not suites:
+ raise ValueError("No tests found matching filters")
+
+ if extras.get('artifact'):
+ rejected = []
+ for suite in suites.keys():
+ if any([suite.startswith(c) for c in self.compiled_suites]):
+ rejected.append(suite)
+ if rejected:
+ raise ValueError("You can't run {} with "
+ "--artifact option.".format(', '.join(rejected)))
+
+ parts.append("-u")
+ parts.append(",".join("%s%s" % (k, "[%s]" % ",".join(v) if v else "")
+ for k,v in sorted(suites.items())) if suites else "none")
+
+ parts.append("-t")
+ parts.append(",".join("%s%s" % (k, "[%s]" % ",".join(v) if v else "")
+ for k,v in sorted(talos.items())) if talos else "none")
+
+ if tags:
+ parts.append(' '.join('--tag %s' % t for t in tags))
+
+ if paths:
+ parts.append("--try-test-paths %s" % " ".join(sorted(paths)))
+
+ args_by_dest = {v['dest']: k for k, v in AutoTry.pass_through_arguments.items()}
+ for dest, value in extras.iteritems():
+ assert dest in args_by_dest
+ arg = args_by_dest[dest]
+ action = AutoTry.pass_through_arguments[arg]['action']
+ if action == 'store':
+ parts.append(arg)
+ parts.append(value)
+ if action == 'append':
+ for e in value:
+ parts.append(arg)
+ parts.append(e)
+ if action in ('store_true', 'store_false'):
+ parts.append(arg)
+
+ try_syntax = " ".join(parts)
+ if extras.get('artifact') and 'all' in suites.keys():
+ message = ('You asked for |-u all| with |--artifact| but compiled-code tests ({tests})'
+ ' can\'t run against an artifact build. Try listing the suites you want'
+ ' instead. For example, this syntax covers most suites:\n{try_syntax}')
+ string_format = {
+ 'tests': ','.join(self.compiled_suites),
+ 'try_syntax': try_syntax.replace(
+ '-u all',
+ '-u ' + ','.join(sorted(set(self.common_suites) - set(self.compiled_suites)))
+ )
+ }
+ raise ValueError(message.format(**string_format))
+
+ return try_syntax
+
+ def _run_git(self, *args):
+ args = ['git'] + list(args)
+ ret = subprocess.call(args)
+ if ret:
+ print('ERROR git command %s returned %s' %
+ (args, ret))
+ sys.exit(1)
+
+ def _git_push_to_try(self, msg):
+ self._run_git('commit', '--allow-empty', '-m', msg)
+ try:
+ self._run_git('push', 'hg::ssh://hg.mozilla.org/try',
+ '+HEAD:refs/heads/branches/default/tip')
+ finally:
+ self._run_git('reset', 'HEAD~')
+
+ def _git_find_changed_files(self):
+ # This finds the files changed on the current branch based on the
+ # diff of the current branch its merge-base base with other branches.
+ try:
+ args = ['git', 'rev-parse', 'HEAD']
+ current_branch = subprocess.check_output(args).strip()
+ args = ['git', 'for-each-ref', 'refs/heads', 'refs/remotes',
+ '--format=%(objectname)']
+ all_branches = subprocess.check_output(args).splitlines()
+ other_branches = set(all_branches) - set([current_branch])
+ args = ['git', 'merge-base', 'HEAD'] + list(other_branches)
+ base_commit = subprocess.check_output(args).strip()
+ args = ['git', 'diff', '--name-only', '-z', 'HEAD', base_commit]
+ return subprocess.check_output(args).strip('\0').split('\0')
+ except subprocess.CalledProcessError as e:
+ print('Failed while determining files changed on this branch')
+ print('Failed whle running: %s' % args)
+ print(e.output)
+ sys.exit(1)
+
+ def _hg_find_changed_files(self):
+ hg_args = [
+ 'hg', 'log', '-r',
+ '::. and not public()',
+ '--template',
+ '{join(files, "\n")}\n',
+ ]
+ try:
+ return subprocess.check_output(hg_args).splitlines()
+ except subprocess.CalledProcessError as e:
+ print('Failed while finding files changed since the last '
+ 'public ancestor')
+ print('Failed whle running: %s' % hg_args)
+ print(e.output)
+ sys.exit(1)
+
+ def find_changed_files(self):
+ """Finds files changed in a local source tree.
+
+ For hg, changes since the last public ancestor of '.' are
+ considered. For git, changes in the current branch are considered.
+ """
+ if self._use_git:
+ return self._git_find_changed_files()
+ return self._hg_find_changed_files()
+
+ def push_to_try(self, msg, verbose):
+ if not self._use_git:
+ try:
+ hg_args = ['hg', 'push-to-try', '-m', msg]
+ subprocess.check_call(hg_args, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ print('ERROR hg command %s returned %s' % (hg_args, e.returncode))
+ print('\nmach failed to push to try. There may be a problem '
+ 'with your ssh key, or another issue with your mercurial '
+ 'installation.')
+ # Check for the presence of the "push-to-try" extension, and
+ # provide instructions if it can't be found.
+ try:
+ subprocess.check_output(['hg', 'showconfig',
+ 'extensions.push-to-try'])
+ except subprocess.CalledProcessError:
+ print('\nThe "push-to-try" hg extension is required. It '
+ 'can be installed to Mercurial 3.3 or above by '
+ 'running ./mach mercurial-setup')
+ sys.exit(1)
+ else:
+ try:
+ which.which('git-cinnabar')
+ self._git_push_to_try(msg)
+ except which.WhichError:
+ print('ERROR git-cinnabar is required to push from git to try with'
+ 'the autotry command.\n\nMore information can by found at '
+ 'https://github.com/glandium/git-cinnabar')
+ sys.exit(1)
+
+ def find_uncommited_changes(self):
+ if self._use_git:
+ stat = subprocess.check_output(['git', 'status', '-z'])
+ return any(len(entry.strip()) and entry.strip()[0] in ('A', 'M', 'D')
+ for entry in stat.split('\0'))
+ else:
+ stat = subprocess.check_output(['hg', 'status'])
+ return any(len(entry.strip()) and entry.strip()[0] in ('A', 'M', 'R')
+ for entry in stat.splitlines())
+
+ def find_paths_and_tags(self, verbose):
+ paths, tags = set(), set()
+ changed_files = self.find_changed_files()
+ if changed_files:
+ if verbose:
+ print("Pushing tests based on modifications to the "
+ "following files:\n\t%s" % "\n\t".join(changed_files))
+
+ from mozbuild.frontend.reader import (
+ BuildReader,
+ EmptyConfig,
+ )
+
+ config = EmptyConfig(self.topsrcdir)
+ reader = BuildReader(config)
+ files_info = reader.files_info(changed_files)
+
+ for path, info in files_info.items():
+ paths |= info.test_files
+ tags |= info.test_tags
+
+ if verbose:
+ if paths:
+ print("Pushing tests based on the following patterns:\n\t%s" %
+ "\n\t".join(paths))
+ if tags:
+ print("Pushing tests based on the following tags:\n\t%s" %
+ "\n\t".join(tags))
+ return paths, tags
diff --git a/testing/tools/fileid/linux_fileid.cpp b/testing/tools/fileid/linux_fileid.cpp
new file mode 100644
index 000000000..de1ecbd1c
--- /dev/null
+++ b/testing/tools/fileid/linux_fileid.cpp
@@ -0,0 +1,41 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdio.h>
+#include <string>
+
+#include "common/linux/file_id.h"
+#include "common/memory.h"
+
+using std::string;
+
+using google_breakpad::auto_wasteful_vector;
+using google_breakpad::FileID;
+using google_breakpad::PageAllocator;
+
+int main(int argc, char** argv)
+{
+
+ if (argc != 2) {
+ fprintf(stderr, "usage: fileid <elf file>\n");
+ return 1;
+ }
+
+ PageAllocator allocator;
+ auto_wasteful_vector<uint8_t, sizeof(MDGUID)> identifier(&allocator);
+ FileID file_id(argv[1]);
+ if (!file_id.ElfFileIdentifier(identifier)) {
+ fprintf(stderr, "%s: unable to generate file identifier\n",
+ argv[1]);
+ return 1;
+ }
+
+ string result_guid = FileID::ConvertIdentifierToUUIDString(identifier);
+
+ // Add an extra "0" at the end. PDB files on Windows have an 'age'
+ // number appended to the end of the file identifier; this isn't
+ // really used or necessary on other platforms, but be consistent.
+ printf("%s0\n", result_guid.c_str());
+ return 0;
+}
diff --git a/testing/tools/fileid/mac_fileid.cpp b/testing/tools/fileid/mac_fileid.cpp
new file mode 100644
index 000000000..114a690a9
--- /dev/null
+++ b/testing/tools/fileid/mac_fileid.cpp
@@ -0,0 +1,52 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdio.h>
+#include <string>
+
+#include "common/mac/arch_utilities.h"
+#include "common/mac/file_id.h"
+
+//TODO: move this somewhere common, this is copied from dump_symbols.cc
+// Format the Mach-O identifier in IDENTIFIER as a UUID with the
+// dashes removed.
+std::string FormatIdentifier(unsigned char identifier[16])
+{
+ char identifier_string[40];
+ google_breakpad::FileID::ConvertIdentifierToString(identifier, identifier_string,
+ sizeof(identifier_string));
+ std::string compacted(identifier_string);
+ for(size_t i = compacted.find('-'); i != std::string::npos;
+ i = compacted.find('-', i))
+ compacted.erase(i, 1);
+ compacted += '0';
+ return compacted;
+}
+
+int main(int argc, char** argv)
+{
+ if (argc != 2) {
+ fprintf(stderr, "usage: fileid <object file>\n");
+ return 1;
+ }
+
+
+ unsigned char identifier[16];
+ google_breakpad::FileID file_id(argv[1]);
+
+ // We should be able to use NXGetLocalArchInfo for this, but it returns
+ // CPU_TYPE_X86 (which is the same as CPU_TYPE_I386) on x86_64 machines,
+ // when our binary will typically have CPU_TYPE_X86_64 to match against.
+ // So we hard code x86_64. In practice that's where we're running tests,
+ // and that's what our debug binaries will contain.
+ if (!file_id.MachoIdentifier(CPU_TYPE_X86_64, CPU_SUBTYPE_MULTIPLE,
+ identifier)) {
+ fprintf(stderr, "%s: unable to generate file identifier\n",
+ argv[1]);
+ return 1;
+ }
+
+ printf("%s\n", FormatIdentifier(identifier).c_str());
+ return 0;
+}
diff --git a/testing/tools/fileid/moz.build b/testing/tools/fileid/moz.build
new file mode 100644
index 000000000..b7276fc77
--- /dev/null
+++ b/testing/tools/fileid/moz.build
@@ -0,0 +1,34 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+GeckoProgram('fileid', linkage=None, msvcrt='static')
+
+if CONFIG['OS_ARCH'] == 'Linux':
+ USE_LIBS += [
+ 'breakpad_linux_common_s',
+ ]
+ SOURCES += ['linux_fileid.cpp']
+
+
+if CONFIG['OS_ARCH'] == 'Darwin':
+ USE_LIBS += [
+ 'breakpad_mac_common_s',
+ ]
+ SOURCES += ['mac_fileid.cpp']
+
+
+if CONFIG['OS_ARCH'] == 'Linux' or CONFIG['OS_ARCH'] == 'Darwin':
+ USE_LIBS += [
+ 'breakpad_common_s',
+ ]
+ LOCAL_INCLUDES += [
+ '/toolkit/crashreporter/google-breakpad/src',
+ ]
+
+if CONFIG['OS_ARCH'] == 'WINNT':
+ SOURCES += ['win_fileid.cpp']
+ OS_LIBS += ['dbghelp']
+ NO_PGO = True
diff --git a/testing/tools/fileid/win_fileid.cpp b/testing/tools/fileid/win_fileid.cpp
new file mode 100644
index 000000000..263229882
--- /dev/null
+++ b/testing/tools/fileid/win_fileid.cpp
@@ -0,0 +1,90 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <windows.h>
+#include <dbghelp.h>
+
+const DWORD CV_SIGNATURE_RSDS = 0x53445352; // 'SDSR'
+
+struct CV_INFO_PDB70 {
+ DWORD CvSignature;
+ GUID Signature;
+ DWORD Age;
+ BYTE PdbFileName[1];
+};
+
+void print_guid(const GUID& guid, DWORD age)
+{
+ printf("%08X%04X%04X%02X%02X%02X%02X%02X%02X%02X%02X%X",
+ guid.Data1, guid.Data2, guid.Data3,
+ guid.Data4[0], guid.Data4[1], guid.Data4[2], guid.Data4[3],
+ guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7],
+ age);
+}
+
+int main(int argc, char** argv)
+{
+ if (argc != 2) {
+ fprintf(stderr, "usage: fileid <file>\n");
+ return 1;
+ }
+
+ HANDLE file = CreateFileA(argv[1],
+ GENERIC_READ,
+ FILE_SHARE_READ,
+ nullptr,
+ OPEN_EXISTING,
+ FILE_ATTRIBUTE_NORMAL,
+ nullptr);
+ if (file == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Couldn't open file: %s\n", argv[1]);
+ return 1;
+ }
+
+ HANDLE mapFile = CreateFileMappingA(file, NULL, PAGE_READONLY, 0, 0, 0);
+ if (mapFile == nullptr) {
+ fprintf(stderr, "Couldn't create file mapping\n");
+ CloseHandle(file);
+ return 1;
+ }
+
+ uint8_t* base = reinterpret_cast<uint8_t*>(MapViewOfFile(mapFile,
+ FILE_MAP_READ,
+ 0,
+ 0,
+ 0));
+ if (base == nullptr) {
+ fprintf(stderr, "Couldn't map file\n");
+ CloseHandle(mapFile);
+ CloseHandle(file);
+ return 1;
+ }
+
+ DWORD size;
+ PIMAGE_DEBUG_DIRECTORY debug_dir =
+ reinterpret_cast<PIMAGE_DEBUG_DIRECTORY>(
+ ImageDirectoryEntryToDataEx(base,
+ FALSE,
+ IMAGE_DIRECTORY_ENTRY_DEBUG,
+ &size,
+ nullptr));
+
+ bool found = false;
+ if (debug_dir->Type == IMAGE_DEBUG_TYPE_CODEVIEW) {
+ CV_INFO_PDB70* cv =
+ reinterpret_cast<CV_INFO_PDB70*>(base + debug_dir->PointerToRawData);
+ if (cv->CvSignature == CV_SIGNATURE_RSDS) {
+ found = true;
+ print_guid(cv->Signature, cv->Age);
+ }
+ }
+
+ UnmapViewOfFile(base);
+ CloseHandle(mapFile);
+ CloseHandle(file);
+
+ return found ? 0 : 1;
+}
diff --git a/testing/tools/grabber/getpages.sh b/testing/tools/grabber/getpages.sh
new file mode 100755
index 000000000..e2c625b3e
--- /dev/null
+++ b/testing/tools/grabber/getpages.sh
@@ -0,0 +1,54 @@
+#! /bin/bash
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+#original author: Alice Nodelman
+#contributor: Darin Fisher
+#
+#takes two inputs, $1 = file containing list of web pages of form http://pagename
+# $2 = output file where list of index files is dumped - useful for places list of links into scripts
+#
+# web pages are dropped in directories named for their urls
+
+if [ $# != 2 ]; then
+ echo 'missing command line arguments'
+ echo
+ echo 'usage: getpages.sh inputfile outputfile'
+ echo ' inputfile: file containing one url per line of the form http://url'
+ echo ' outputfile: file to be created during execution, contains a list of index files one per url'
+ exit
+fi
+
+# generates the list of files to be cleansed (exclude image files)
+# disables any existing call-outs to the live web
+# provided by Darin Fisher
+cleanse_files() {
+ find "$1" -type f -a -print0 ! -iname \*.jpg -a ! -iname \*.gif -a ! -iname \*.png -a ! -name \*.bak | xargs -0 perl -pi -e 's/[a-zA-Z0-9_]*.writeln/void/g;' -e 's/[a-zA-Z0-9_]*.write/void/g;' -e 's/[a-zA-Z0-9_]*.open/void/g;' -e 's/"https/"httpsdisabled/gi;' -e 's/"http/"httpdisabled/gi;' -e 's/<object/<objectdisabled/gi;' -e 's/<embed/<embeddisabled/gi;' -e 's/load/loaddisabled/g;'
+}
+
+mkdir testpages
+cd testpages
+for URL in $(cat ../$1); do
+ #strip the leading http:// from the url
+ CLEANURL=$(echo $URL | sed -e 's/http:\/\/\(.*\)/\1/')
+ #create a directory with the cleaned url as the name
+ echo "grabbing "$CLEANURL
+ mkdir $CLEANURL
+ cd $CLEANURL
+ ../../wget-1.10-css-parser/src/wget -p -k -H -E -erobots=off --no-check-certificate -U "Mozilla/5.0 (firefox)" --restrict-file-names=windows $URL -o outputlog.txt
+ #figure out where/what the index file for this page is from the wget output log
+ FILENAME=$(grep "saved" outputlog.txt | head -1 | sed -e "s/.*\`\(.*\)\'.*/\1/")
+ rm outputlog.txt
+ cd ..
+
+ #do the final cleanup of any dangling urls
+ #with thanks to Darin Fisher for the code
+ cleanse_files $CLEANURL
+
+ #add the index file link to the list of index files being generated
+ echo $CLEANURL/$FILENAME >> $2
+done
+cd ..
+
diff --git a/testing/tools/iceserver/iceserver.py b/testing/tools/iceserver/iceserver.py
new file mode 100644
index 000000000..3e1d31de9
--- /dev/null
+++ b/testing/tools/iceserver/iceserver.py
@@ -0,0 +1,759 @@
+# vim: set ts=4 et sw=4 tw=80
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import ipaddr
+import socket
+import hmac
+import hashlib
+import passlib.utils # for saslprep
+import copy
+import random
+import operator
+import platform
+import time
+from string import Template
+from twisted.internet import reactor, protocol
+from twisted.internet.task import LoopingCall
+from twisted.internet.address import IPv4Address
+
+MAGIC_COOKIE = 0x2112A442
+
+REQUEST = 0
+INDICATION = 1
+SUCCESS_RESPONSE = 2
+ERROR_RESPONSE = 3
+
+BINDING = 0x001
+ALLOCATE = 0x003
+REFRESH = 0x004
+SEND = 0x006
+DATA_MSG = 0x007
+CREATE_PERMISSION = 0x008
+CHANNEL_BIND = 0x009
+
+IPV4 = 1
+IPV6 = 2
+
+MAPPED_ADDRESS = 0x0001
+USERNAME = 0x0006
+MESSAGE_INTEGRITY = 0x0008
+ERROR_CODE = 0x0009
+UNKNOWN_ATTRIBUTES = 0x000A
+LIFETIME = 0x000D
+DATA_ATTR = 0x0013
+XOR_PEER_ADDRESS = 0x0012
+REALM = 0x0014
+NONCE = 0x0015
+XOR_RELAYED_ADDRESS = 0x0016
+REQUESTED_TRANSPORT = 0x0019
+DONT_FRAGMENT = 0x001A
+XOR_MAPPED_ADDRESS = 0x0020
+SOFTWARE = 0x8022
+ALTERNATE_SERVER = 0x8023
+FINGERPRINT = 0x8028
+
+def unpack_uint(bytes_buf):
+ result = 0
+ for byte in bytes_buf:
+ result = (result << 8) + byte
+ return result
+
+def pack_uint(value, width):
+ if value < 0:
+ raise ValueError("Invalid value: {}".format(value))
+ buf = bytearray([0]*width)
+ for i in range(0, width):
+ buf[i] = (value >> (8*(width - i - 1))) & 0xFF
+
+ return buf
+
+def unpack(bytes_buf, format_array):
+ results = ()
+ for width in format_array:
+ results = results + (unpack_uint(bytes_buf[0:width]),)
+ bytes_buf = bytes_buf[width:]
+ return results
+
+def pack(values, format_array):
+ if len(values) != len(format_array):
+ raise ValueError()
+ buf = bytearray()
+ for i in range(0, len(values)):
+ buf.extend(pack_uint(values[i], format_array[i]))
+ return buf
+
+def bitwise_pack(source, dest, start_bit, num_bits):
+ if num_bits <= 0 or num_bits > start_bit + 1:
+ raise ValueError("Invalid num_bits: {}, start_bit = {}"
+ .format(num_bits, start_bit))
+ last_bit = start_bit - num_bits + 1
+ source = source >> last_bit
+ dest = dest << num_bits
+ mask = (1 << num_bits) - 1
+ dest += source & mask
+ return dest
+
+
+class StunAttribute(object):
+ """
+ Represents a STUN attribute in a raw format, according to the following:
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | StunAttribute.attr_type | Length (derived as needed) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | StunAttribute.data (variable length) ....
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ """
+
+ __attr_header_fmt = [2,2]
+ __attr_header_size = reduce(operator.add, __attr_header_fmt)
+
+ def __init__(self, attr_type=0, buf=bytearray()):
+ self.attr_type = attr_type
+ self.data = buf
+
+ def build(self):
+ buf = pack((self.attr_type, len(self.data)), self.__attr_header_fmt)
+ buf.extend(self.data)
+ # add padding if necessary
+ if len(buf) % 4:
+ buf.extend([0]*(4 - (len(buf) % 4)))
+ return buf
+
+ def parse(self, buf):
+ if self.__attr_header_size > len(buf):
+ raise Exception('truncated at attribute: incomplete header')
+
+ self.attr_type, length = unpack(buf, self.__attr_header_fmt)
+ length += self.__attr_header_size
+
+ if length > len(buf):
+ raise Exception('truncated at attribute: incomplete contents')
+
+ self.data = buf[self.__attr_header_size:length]
+
+ # verify padding
+ while length % 4:
+ if buf[length]:
+ raise ValueError("Non-zero padding")
+ length += 1
+
+ return length
+
+
+class StunMessage(object):
+ """
+ Represents a STUN message. Contains a method, msg_class, cookie,
+ transaction_id, and attributes (as an array of StunAttribute).
+
+ Has various functions for getting/adding attributes.
+ """
+
+ def __init__(self):
+ self.method = 0
+ self.msg_class = 0
+ self.cookie = MAGIC_COOKIE
+ self.transaction_id = 0
+ self.attributes = []
+
+# 0 1 2 3
+# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# |0 0|M M M M M|C|M M M|C|M M M M| Message Length |
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# | Magic Cookie |
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# | |
+# | Transaction ID (96 bits) |
+# | |
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ __header_fmt = [2, 2, 4, 12]
+ __header_size = reduce(operator.add, __header_fmt)
+
+ # Returns how many bytes were parsed if buf was large enough, or how many
+ # bytes we would have needed if not. Throws if buf is malformed.
+ def parse(self, buf):
+ min_buf_size = self.__header_size
+ if len(buf) < min_buf_size:
+ return min_buf_size
+
+ message_type, length, cookie, self.transaction_id = unpack(
+ buf, self.__header_fmt)
+ min_buf_size += length
+ if len(buf) < min_buf_size:
+ return min_buf_size
+
+ # Avert your eyes...
+ self.method = bitwise_pack(message_type, 0, 13, 5)
+ self.msg_class = bitwise_pack(message_type, 0, 8, 1)
+ self.method = bitwise_pack(message_type, self.method, 7, 3)
+ self.msg_class = bitwise_pack(message_type, self.msg_class, 4, 1)
+ self.method = bitwise_pack(message_type, self.method, 3, 4)
+
+ if cookie != self.cookie:
+ raise Exception('Invalid cookie: {}'.format(cookie))
+
+ buf = buf[self.__header_size:min_buf_size]
+ while len(buf):
+ attr = StunAttribute()
+ length = attr.parse(buf)
+ buf = buf[length:]
+ self.attributes.append(attr)
+
+ return min_buf_size
+
+ # stop_after_attr_type is useful for calculating MESSAGE-DIGEST
+ def build(self, stop_after_attr_type=0):
+ attrs = bytearray()
+ for attr in self.attributes:
+ attrs.extend(attr.build())
+ if attr.attr_type == stop_after_attr_type:
+ break
+
+ message_type = bitwise_pack(self.method, 0, 11, 5)
+ message_type = bitwise_pack(self.msg_class, message_type, 1, 1)
+ message_type = bitwise_pack(self.method, message_type, 6, 3)
+ message_type = bitwise_pack(self.msg_class, message_type, 0, 1)
+ message_type = bitwise_pack(self.method, message_type, 3, 4)
+
+ message = pack((message_type,
+ len(attrs),
+ self.cookie,
+ self.transaction_id), self.__header_fmt)
+ message.extend(attrs)
+
+ return message
+
+ def add_error_code(self, code, phrase=None):
+# 0 1 2 3
+# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# | Reserved, should be 0 |Class| Number |
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# | Reason Phrase (variable) ..
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ error_code_fmt = [3, 1]
+ error_code = pack((code // 100, code % 100), error_code_fmt)
+ if phrase != None:
+ error_code.extend(bytearray(phrase))
+ self.attributes.append(StunAttribute(ERROR_CODE, error_code))
+
+# 0 1 2 3
+# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# |x x x x x x x x| Family | X-Port |
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+# | X-Address (Variable)
+# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ __xor_v4addr_fmt = [1, 1, 2, 4]
+ __xor_v6addr_fmt = [1, 1, 2, 16]
+ __xor_v4addr_size = reduce(operator.add, __xor_v4addr_fmt)
+ __xor_v6addr_size = reduce(operator.add, __xor_v6addr_fmt)
+
+ def get_xaddr(self, ip_addr, version):
+ if version == IPV4:
+ return self.cookie ^ ip_addr
+ elif version == IPV6:
+ return ((self.cookie << 96) + self.transaction_id) ^ ip_addr
+ else:
+ raise ValueError("Invalid family: {}".format(family))
+
+ def get_xport(self, port):
+ return (self.cookie >> 16) ^ port
+
+ def add_xor_address(self, addr_port, attr_type):
+ ip_address = ipaddr.IPAddress(addr_port.host)
+ xport = self.get_xport(addr_port.port)
+
+ if ip_address.version == 4:
+ xaddr = self.get_xaddr(int(ip_address), IPV4)
+ xor_address = pack((0, IPV4, xport, xaddr), self.__xor_v4addr_fmt)
+ elif ip_address.version == 6:
+ xaddr = self.get_xaddr(int(ip_address), IPV6)
+ xor_address = pack((0, IPV6, xport, xaddr), self.__xor_v6addr_fmt)
+ else:
+ raise ValueError("Invalid ip version: {}"
+ .format(ip_address.version))
+
+ self.attributes.append(StunAttribute(attr_type, xor_address))
+
+ def add_data(self, buf):
+ self.attributes.append(StunAttribute(DATA_ATTR, buf))
+
+ def find(self, attr_type):
+ for attr in self.attributes:
+ if attr.attr_type == attr_type:
+ return attr
+ return None
+
+ def get_xor_address(self, attr_type):
+ addr_attr = self.find(attr_type)
+ if not addr_attr:
+ return None
+
+ padding, family, xport, xaddr = unpack(addr_attr.data,
+ self.__xor_v4addr_fmt)
+ addr_ctor = IPv4Address
+ if family == IPV6:
+ from twisted.internet.address import IPv6Address
+ padding, family, xport, xaddr = unpack(addr_attr.data,
+ self.__xor_v6addr_fmt)
+ addr_ctor = IPv6Address
+ elif family != IPV4:
+ raise ValueError("Invalid family: {}".format(family))
+
+ return addr_ctor('UDP',
+ str(ipaddr.IPAddress(self.get_xaddr(xaddr, family))),
+ self.get_xport(xport))
+
+ def add_nonce(self, nonce):
+ self.attributes.append(StunAttribute(NONCE, bytearray(nonce)))
+
+ def add_realm(self, realm):
+ self.attributes.append(StunAttribute(REALM, bytearray(realm)))
+
+ def calculate_message_digest(self, username, realm, password):
+ digest_buf = self.build(MESSAGE_INTEGRITY)
+ # Trim off the MESSAGE-INTEGRITY attr
+ digest_buf = digest_buf[:len(digest_buf) - 24]
+ password = passlib.utils.saslprep(unicode(password))
+ key_string = "{}:{}:{}".format(username, realm, password)
+ md5 = hashlib.md5()
+ md5.update(key_string)
+ key = md5.digest()
+ return bytearray(hmac.new(key, digest_buf, hashlib.sha1).digest())
+
+ def add_lifetime(self, lifetime):
+ self.attributes.append(StunAttribute(LIFETIME, pack_uint(lifetime, 4)))
+
+ def get_lifetime(self):
+ lifetime_attr = self.find(LIFETIME)
+ if not lifetime_attr:
+ return None
+ return unpack_uint(lifetime_attr.data[0:4])
+
+ def get_username(self):
+ username = self.find(USERNAME)
+ if not username:
+ return None
+ return str(username.data)
+
+ def add_message_integrity(self, username, realm, password):
+ dummy_value = bytearray([0]*20)
+ self.attributes.append(StunAttribute(MESSAGE_INTEGRITY, dummy_value))
+ digest = self.calculate_message_digest(username, realm, password)
+ self.find(MESSAGE_INTEGRITY).data = digest
+
+
+class Allocation(protocol.DatagramProtocol):
+ """
+ Comprises the socket for a TURN allocation, a back-reference to the
+ transport we will forward received traffic on, the allocator's address and
+ username, the set of permissions for the allocation, and the allocation's
+ expiry.
+ """
+
+ def __init__(self, other_transport_handler, allocator_address, username):
+ self.permissions = set() # str, int tuples
+ # Handler to use when sending stuff that arrives on the allocation
+ self.other_transport_handler = other_transport_handler
+ self.allocator_address = allocator_address
+ self.username = username
+ self.expiry = time.time()
+ self.port = reactor.listenUDP(0, self, interface=v4_address)
+
+ def datagramReceived(self, data, (host, port)):
+ if not host in self.permissions:
+ print("Dropping packet from {}:{}, no permission on allocation {}"
+ .format(host, port, self.transport.getHost()))
+ return
+
+ data_indication = StunMessage()
+ data_indication.method = DATA_MSG
+ data_indication.msg_class = INDICATION
+ data_indication.transaction_id = random.getrandbits(96)
+
+ # Only handles UDP allocations. Doubtful that we need more than this.
+ data_indication.add_xor_address(IPv4Address('UDP', host, port),
+ XOR_PEER_ADDRESS)
+ data_indication.add_data(data)
+
+ self.other_transport_handler.write(data_indication.build(),
+ self.allocator_address)
+
+ def close(self):
+ self.port.stopListening()
+ self.port = None
+
+
+class StunHandler(object):
+ """
+ Frames and handles STUN messages. This is the core logic of the TURN
+ server, along with Allocation.
+ """
+
+ def __init__(self, transport_handler):
+ self.client_address = None
+ self.data = str()
+ self.transport_handler = transport_handler
+
+ def data_received(self, data, address):
+ self.data += bytearray(data)
+ while True:
+ stun_message = StunMessage()
+ parsed_len = stun_message.parse(self.data)
+ if parsed_len > len(self.data):
+ break
+ self.data = self.data[parsed_len:]
+
+ response = self.handle_stun(stun_message, address)
+ if response:
+ self.transport_handler.write(response, address)
+
+ def handle_stun(self, stun_message, address):
+ self.client_address = address
+ if stun_message.msg_class == INDICATION:
+ if stun_message.method == SEND:
+ self.handle_send_indication(stun_message)
+ else:
+ print("Dropping unknown indication method: {}"
+ .format(stun_message.method))
+ return None
+
+ if stun_message.msg_class != REQUEST:
+ print("Dropping STUN response, method: {}"
+ .format(stun_message.method))
+ return None
+
+ if stun_message.method == BINDING:
+ return self.make_success_response(stun_message).build()
+ elif stun_message.method == ALLOCATE:
+ return self.handle_allocation(stun_message).build()
+ elif stun_message.method == REFRESH:
+ return self.handle_refresh(stun_message).build()
+ elif stun_message.method == CREATE_PERMISSION:
+ return self.handle_permission(stun_message).build()
+ else:
+ return self.make_error_response(
+ stun_message,
+ 400,
+ ("Unsupported STUN request, method: {}"
+ .format(stun_message.method))).build()
+
+ def get_allocation_tuple(self):
+ return (self.client_address.host,
+ self.client_address.port,
+ self.transport_handler.transport.getHost().type,
+ self.transport_handler.transport.getHost().host,
+ self.transport_handler.transport.getHost().port)
+
+ def handle_allocation(self, request):
+ allocate_response = self.check_long_term_auth(request)
+ if allocate_response.msg_class == SUCCESS_RESPONSE:
+ if self.get_allocation_tuple() in allocations:
+ return self.make_error_response(
+ request,
+ 437,
+ ("Duplicate allocation request for tuple {}"
+ .format(self.get_allocation_tuple())))
+
+ allocation = Allocation(self.transport_handler,
+ self.client_address,
+ request.get_username())
+
+ allocate_response.add_xor_address(allocation.transport.getHost(),
+ XOR_RELAYED_ADDRESS)
+
+ lifetime = request.get_lifetime()
+ if lifetime == None:
+ return self.make_error_response(
+ request,
+ 400,
+ "Missing lifetime attribute in allocation request")
+
+ lifetime = min(lifetime, 3600)
+ allocate_response.add_lifetime(lifetime)
+ allocation.expiry = time.time() + lifetime
+
+ allocate_response.add_message_integrity(turn_user,
+ turn_realm,
+ turn_pass)
+ allocations[self.get_allocation_tuple()] = allocation
+ return allocate_response
+
+ def handle_refresh(self, request):
+ refresh_response = self.check_long_term_auth(request)
+ if refresh_response.msg_class == SUCCESS_RESPONSE:
+ try:
+ allocation = allocations[self.get_allocation_tuple()]
+ except KeyError:
+ return self.make_error_response(
+ request,
+ 437,
+ ("Refresh request for non-existing allocation, tuple {}"
+ .format(self.get_allocation_tuple())))
+
+ if allocation.username != request.get_username():
+ return self.make_error_response(
+ request,
+ 441,
+ ("Refresh request with wrong user, exp {}, got {}"
+ .format(allocation.username, request.get_username())))
+
+ lifetime = request.get_lifetime()
+ if lifetime == None:
+ return self.make_error_response(
+ request,
+ 400,
+ "Missing lifetime attribute in allocation request")
+
+ lifetime = min(lifetime, 3600)
+ refresh_response.add_lifetime(lifetime)
+ allocation.expiry = time.time() + lifetime
+
+ refresh_response.add_message_integrity(turn_user,
+ turn_realm,
+ turn_pass)
+ return refresh_response
+
+ def handle_permission(self, request):
+ permission_response = self.check_long_term_auth(request)
+ if permission_response.msg_class == SUCCESS_RESPONSE:
+ try:
+ allocation = allocations[self.get_allocation_tuple()]
+ except KeyError:
+ return self.make_error_response(
+ request,
+ 437,
+ ("No such allocation for permission request, tuple {}"
+ .format(self.get_allocation_tuple())))
+
+ if allocation.username != request.get_username():
+ return self.make_error_response(
+ request,
+ 441,
+ ("Permission request with wrong user, exp {}, got {}"
+ .format(allocation.username, request.get_username())))
+
+ # TODO: Handle multiple XOR-PEER-ADDRESS
+ peer_address = request.get_xor_address(XOR_PEER_ADDRESS)
+ if not peer_address:
+ return self.make_error_response(
+ request,
+ 400,
+ "Missing XOR-PEER-ADDRESS on permission request")
+
+ permission_response.add_message_integrity(turn_user,
+ turn_realm,
+ turn_pass)
+ allocation.permissions.add(peer_address.host)
+
+ return permission_response
+
+ def handle_send_indication(self, indication):
+ try:
+ allocation = allocations[self.get_allocation_tuple()]
+ except KeyError:
+ print("Dropping send indication; no allocation for tuple {}"
+ .format(self.get_allocation_tuple()))
+ return
+
+ peer_address = indication.get_xor_address(XOR_PEER_ADDRESS)
+ if not peer_address:
+ print("Dropping send indication, missing XOR-PEER-ADDRESS")
+ return
+
+ data_attr = indication.find(DATA_ATTR)
+ if not data_attr:
+ print("Dropping send indication, missing DATA")
+ return
+
+ if indication.find(DONT_FRAGMENT):
+ print("Dropping send indication, DONT-FRAGMENT set")
+ return
+
+ if not peer_address.host in allocation.permissions:
+ print("Dropping send indication, no permission for {} on tuple {}"
+ .format(peer_address.host, self.get_allocation_tuple()))
+ return
+
+ allocation.transport.write(data_attr.data,
+ (peer_address.host, peer_address.port))
+
+ def make_success_response(self, request):
+ response = copy.deepcopy(request)
+ response.attributes = []
+ response.add_xor_address(self.client_address, XOR_MAPPED_ADDRESS)
+ response.msg_class = SUCCESS_RESPONSE
+ return response
+
+ def make_error_response(self, request, code, reason=None):
+ if reason:
+ print("{}: rejecting with {}".format(reason, code))
+ response = copy.deepcopy(request)
+ response.attributes = []
+ response.add_error_code(code, reason)
+ response.msg_class = ERROR_RESPONSE
+ return response
+
+ def make_challenge_response(self, request, reason=None):
+ response = self.make_error_response(request, 401, reason)
+ # 65 means the hex encoding will need padding half the time
+ response.add_nonce("{:x}".format(random.getrandbits(65)))
+ response.add_realm(turn_realm)
+ return response
+
+ def check_long_term_auth(self, request):
+ message_integrity = request.find(MESSAGE_INTEGRITY)
+ if not message_integrity:
+ return self.make_challenge_response(request)
+
+ username = request.find(USERNAME)
+ realm = request.find(REALM)
+ nonce = request.find(NONCE)
+ if not username or not realm or not nonce:
+ return self.make_error_response(
+ request,
+ 400,
+ "Missing either USERNAME, NONCE, or REALM")
+
+ if str(username.data) != turn_user:
+ return self.make_challenge_response(
+ request,
+ "Wrong user {}, exp {}".format(username.data, turn_user))
+
+ expected_message_digest = request.calculate_message_digest(turn_user,
+ turn_realm,
+ turn_pass)
+ if message_integrity.data != expected_message_digest:
+ return self.make_challenge_response(request,
+ "Incorrect message disgest")
+
+ return self.make_success_response(request)
+
+
+class UdpStunHandler(protocol.DatagramProtocol):
+ """
+ Represents a UDP listen port for TURN.
+ """
+
+ def datagramReceived(self, data, address):
+ stun_handler = StunHandler(self)
+ stun_handler.data_received(data,
+ IPv4Address('UDP', address[0], address[1]))
+
+ def write(self, data, address):
+ self.transport.write(str(data), (address.host, address.port))
+
+
+class TcpStunHandlerFactory(protocol.Factory):
+ """
+ Represents a TCP listen port for TURN.
+ """
+
+ def buildProtocol(self, addr):
+ return TcpStunHandler(addr)
+
+
+class TcpStunHandler(protocol.Protocol):
+ """
+ Represents a connected TCP port for TURN.
+ """
+
+ def __init__(self, addr):
+ self.address = addr
+ self.stun_handler = None
+
+ def dataReceived(self, data):
+ # This needs to persist, since it handles framing
+ if not self.stun_handler:
+ self.stun_handler = StunHandler(self)
+ self.stun_handler.data_received(data, self.address)
+
+ def connectionLost(self, reason):
+ print("Lost connection from {}".format(self.address))
+ # Destroy allocations that this connection made
+ for key, allocation in allocations.items():
+ if allocation.other_transport_handler == self:
+ print("Closing allocation due to dropped connection: {}"
+ .format(key))
+ del allocations[key]
+ allocation.close()
+
+ def write(self, data, address):
+ self.transport.write(str(data))
+
+def get_default_route(family):
+ dummy_socket = socket.socket(family, socket.SOCK_DGRAM)
+ if family is socket.AF_INET:
+ dummy_socket.connect(("8.8.8.8", 53))
+ else:
+ dummy_socket.connect(("2001:4860:4860::8888", 53))
+
+ default_route = dummy_socket.getsockname()[0]
+ dummy_socket.close()
+ return default_route
+
+turn_user="foo"
+turn_pass="bar"
+turn_realm="mozilla.invalid"
+allocations = {}
+v4_address = get_default_route(socket.AF_INET)
+try:
+ v6_address = get_default_route(socket.AF_INET6)
+except:
+ v6_address = ""
+
+def prune_allocations():
+ now = time.time()
+ for key, allocation in allocations.items():
+ if allocation.expiry < now:
+ print("Allocation expired: {}".format(key))
+ del allocations[key]
+ allocation.close()
+
+if __name__ == "__main__":
+ random.seed()
+
+ if platform.system() is "Windows":
+ # Windows is finicky about allowing real interfaces to talk to loopback.
+ interface_4 = v4_address
+ interface_6 = v6_address
+ hostname = socket.gethostname()
+ else:
+ # Our linux builders do not have a hostname that resolves to the real
+ # interface.
+ interface_4 = "127.0.0.1"
+ interface_6 = "::1"
+ hostname = "localhost"
+
+ reactor.listenUDP(3478, UdpStunHandler(), interface=interface_4)
+ reactor.listenTCP(3478, TcpStunHandlerFactory(), interface=interface_4)
+
+ try:
+ reactor.listenUDP(3478, UdpStunHandler(), interface=interface_6)
+ reactor.listenTCP(3478, TcpStunHandlerFactory(), interface=interface_6)
+ except:
+ pass
+
+ allocation_pruner = LoopingCall(prune_allocations)
+ allocation_pruner.start(1)
+
+ template = Template(
+'[\
+{"url":"stun:$hostname"}, \
+{"url":"stun:$hostname?transport=tcp"}, \
+{"username":"$user","credential":"$pwd","url":"turn:$hostname"}, \
+{"username":"$user","credential":"$pwd","url":"turn:$hostname?transport=tcp"}]'
+)
+
+ print(template.substitute(user=turn_user,
+ pwd=turn_pass,
+ hostname=hostname))
+
+ reactor.run()
+
diff --git a/testing/tools/mach_test_package_bootstrap.py b/testing/tools/mach_test_package_bootstrap.py
new file mode 100644
index 000000000..f7a8fb397
--- /dev/null
+++ b/testing/tools/mach_test_package_bootstrap.py
@@ -0,0 +1,194 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import print_function, unicode_literals
+
+import json
+import os
+import platform
+import sys
+import types
+
+
+SEARCH_PATHS = [
+ 'marionette',
+ 'marionette/marionette/runner/mixins/browsermob-proxy-py',
+ 'marionette/client',
+ 'mochitest',
+ 'mozbase/manifestparser',
+ 'mozbase/mozcrash',
+ 'mozbase/mozdebug',
+ 'mozbase/mozdevice',
+ 'mozbase/mozfile',
+ 'mozbase/mozhttpd',
+ 'mozbase/mozinfo',
+ 'mozbase/mozinstall',
+ 'mozbase/mozleak',
+ 'mozbase/mozlog',
+ 'mozbase/moznetwork',
+ 'mozbase/mozprocess',
+ 'mozbase/mozprofile',
+ 'mozbase/mozrunner',
+ 'mozbase/mozscreenshot',
+ 'mozbase/mozsystemmonitor',
+ 'mozbase/moztest',
+ 'mozbase/mozversion',
+ 'reftest',
+ 'tools/mach',
+ 'tools/wptserve',
+ 'xpcshell',
+]
+
+# Individual files providing mach commands.
+MACH_MODULES = [
+ 'marionette/mach_test_package_commands.py',
+ 'mochitest/mach_test_package_commands.py',
+ 'reftest/mach_test_package_commands.py',
+ 'tools/mach/mach/commands/commandinfo.py',
+ 'xpcshell/mach_test_package_commands.py',
+]
+
+
+CATEGORIES = {
+ 'testing': {
+ 'short': 'Testing',
+ 'long': 'Run tests.',
+ 'priority': 30,
+ },
+ 'devenv': {
+ 'short': 'Development Environment',
+ 'long': 'Set up and configure your development environment.',
+ 'priority': 20,
+ },
+ 'misc': {
+ 'short': 'Potpourri',
+ 'long': 'Potent potables and assorted snacks.',
+ 'priority': 10,
+ },
+ 'disabled': {
+ 'short': 'Disabled',
+ 'long': 'The disabled commands are hidden by default. Use -v to display them. '
+ 'These commands are unavailable for your current context, '
+ 'run "mach <command>" to see why.',
+ 'priority': 0,
+ }
+}
+
+
+def ancestors(path, depth=0):
+ """Emit the parent directories of a path."""
+ count = 1
+ while path and count != depth:
+ yield path
+ newpath = os.path.dirname(path)
+ if newpath == path:
+ break
+ path = newpath
+ count += 1
+
+
+def find_firefox(context):
+ """Try to automagically find the firefox binary."""
+ import mozinstall
+ search_paths = []
+
+ # Check for a mozharness setup
+ config = context.mozharness_config
+ if config and 'binary_path' in config:
+ return config['binary_path']
+ elif config:
+ search_paths.append(os.path.join(context.mozharness_workdir, 'application'))
+
+ # Check for test-stage setup
+ dist_bin = os.path.join(os.path.dirname(context.package_root), 'bin')
+ if os.path.isdir(dist_bin):
+ search_paths.append(dist_bin)
+
+ for path in search_paths:
+ try:
+ return mozinstall.get_binary(path, 'firefox')
+ except mozinstall.InvalidBinary:
+ continue
+
+
+def find_hostutils(context):
+ workdir = context.mozharness_workdir
+ hostutils = os.path.join(workdir, 'hostutils')
+ for fname in os.listdir(hostutils):
+ fpath = os.path.join(hostutils, fname)
+ if os.path.isdir(fpath) and fname.startswith('host-utils'):
+ return fpath
+
+
+def normalize_test_path(test_root, path):
+ if os.path.isabs(path) or os.path.exists(path):
+ return os.path.normpath(os.path.abspath(path))
+
+ for parent in ancestors(test_root):
+ test_path = os.path.join(parent, path)
+ if os.path.exists(test_path):
+ return os.path.normpath(os.path.abspath(test_path))
+
+
+def bootstrap(test_package_root):
+ test_package_root = os.path.abspath(test_package_root)
+
+ # Ensure we are running Python 2.7+. We put this check here so we generate a
+ # user-friendly error message rather than a cryptic stack trace on module
+ # import.
+ if sys.version_info[0] != 2 or sys.version_info[1] < 7:
+ print('Python 2.7 or above (but not Python 3) is required to run mach.')
+ print('You are running Python', platform.python_version())
+ sys.exit(1)
+
+ sys.path[0:0] = [os.path.join(test_package_root, path) for path in SEARCH_PATHS]
+ import mach.main
+
+ def populate_context(context, key=None):
+ if key is None:
+ context.package_root = test_package_root
+ context.bin_dir = os.path.join(test_package_root, 'bin')
+ context.certs_dir = os.path.join(test_package_root, 'certs')
+ context.module_dir = os.path.join(test_package_root, 'modules')
+ context.ancestors = ancestors
+ context.normalize_test_path = normalize_test_path
+ return
+
+ # The values for the following 'key's will be set lazily, and cached
+ # after first being invoked.
+ if key == 'firefox_bin':
+ return find_firefox(context)
+
+ if key == 'hostutils':
+ return find_hostutils(context)
+
+ if key == 'mozharness_config':
+ for dir_path in ancestors(context.package_root):
+ mozharness_config = os.path.join(dir_path, 'logs', 'localconfig.json')
+ if os.path.isfile(mozharness_config):
+ with open(mozharness_config, 'rb') as f:
+ return json.load(f)
+ return {}
+
+ if key == 'mozharness_workdir':
+ config = context.mozharness_config
+ if config:
+ return os.path.join(config['base_work_dir'], config['work_dir'])
+
+ mach = mach.main.Mach(os.getcwd())
+ mach.populate_context_handler = populate_context
+
+ for category, meta in CATEGORIES.items():
+ mach.define_category(category, meta['short'], meta['long'],
+ meta['priority'])
+
+ for path in MACH_MODULES:
+ cmdfile = os.path.join(test_package_root, path)
+
+ # Depending on which test zips were extracted,
+ # the command module might not exist
+ if os.path.isfile(cmdfile):
+ mach.load_commands_from_file(cmdfile)
+
+ return mach
diff --git a/testing/tools/minidumpwriter/minidumpwriter.cpp b/testing/tools/minidumpwriter/minidumpwriter.cpp
new file mode 100644
index 000000000..1335006ff
--- /dev/null
+++ b/testing/tools/minidumpwriter/minidumpwriter.cpp
@@ -0,0 +1,59 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+
+/*
+ * Given a PID and a path to a target file, write a minidump of the
+ * corresponding process in that file. This is taken more or less
+ * verbatim from mozcrash and translated to C++ to avoid problems
+ * writing a minidump of 64 bit Firefox from a 32 bit python.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <windows.h>
+#include <dbghelp.h>
+
+int wmain(int argc, wchar_t** argv)
+{
+ if (argc != 3) {
+ fprintf(stderr, "Usage: minidumpwriter <PID> <DUMP_FILE>\n");
+ return 1;
+ }
+
+ DWORD pid = (DWORD) _wtoi(argv[1]);
+
+ if (pid <= 0) {
+ fprintf(stderr, "Usage: minidumpwriter <PID> <DUMP_FILE>\n");
+ return 1;
+ }
+
+ wchar_t* dumpfile = argv[2];
+ int rv = 1;
+ HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
+ 0, pid);
+ if (!hProcess) {
+ fprintf(stderr, "Couldn't get handle for %d\n", pid);
+ return rv;
+ }
+
+ HANDLE file = CreateFileW(dumpfile, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, nullptr);
+ if (file == INVALID_HANDLE_VALUE) {
+ fprintf(stderr, "Couldn't open dump file at %S\n", dumpfile);
+ CloseHandle(hProcess);
+ return rv;
+ }
+
+ rv = 0;
+ if (!MiniDumpWriteDump(hProcess, pid, file, MiniDumpNormal,
+ nullptr, nullptr, nullptr)) {
+ fprintf(stderr, "Error 0x%X in MiniDumpWriteDump\n", GetLastError());
+ rv = 1;
+ }
+
+ CloseHandle(file);
+ CloseHandle(hProcess);
+ return rv;
+}
diff --git a/testing/tools/minidumpwriter/moz.build b/testing/tools/minidumpwriter/moz.build
new file mode 100644
index 000000000..21cf9b416
--- /dev/null
+++ b/testing/tools/minidumpwriter/moz.build
@@ -0,0 +1,19 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+if CONFIG['ENABLE_TESTS'] and CONFIG['CPU_ARCH'] == 'x86_64' and CONFIG['OS_ARCH'] == 'WINNT':
+ Program('minidumpwriter')
+ OS_LIBS += [
+ 'dbghelp',
+ ]
+ SOURCES += [
+ 'minidumpwriter.cpp',
+ ]
+ USE_STATIC_LIBS = True
+ if CONFIG['GNU_CC']:
+ WIN32_EXE_LDFLAGS += ['-municode']
+
+NO_PGO = True
diff --git a/testing/tools/proxyserver/proxyserver.py b/testing/tools/proxyserver/proxyserver.py
new file mode 100644
index 000000000..d7b2ef161
--- /dev/null
+++ b/testing/tools/proxyserver/proxyserver.py
@@ -0,0 +1,269 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Caching HTTP Proxy for use with the Talos pageload tests
+Author: Rob Arnold
+
+This file implements a multithreaded caching http 1.1 proxy. HEAD and GET
+methods are supported; POST is not yet.
+
+Each incoming request is put onto a new thread; python does not have a thread
+pool library, so a new thread is spawned for each request. I have tried to use
+the python 2.4 standard library wherever possible.
+
+Caching:
+The cache is implemented in the Cache class. Items can only be added to the
+cache. The only way to remove items from the cache is to blow it all away,
+either by deleting the file (default: proxy_cache.db) or passing the -c or
+--clear-cache flags on the command line. It is technically possible to remove
+items individually from the cache, but there has been no need to do so so far.
+
+The cache is implemented with the shelve module. The key is the combination of
+host, port and request (path + params + fragment) and the values stored are the
+http status code, headers and content that were received from the remote server.
+
+Access to the cache is guarded by a semaphore which allows concurrent read
+access. The semaphore is guarded by a simple mutex which prevents a deadlock
+from occuring when two threads try to add an item to the cache at the same time.
+
+Memory usage is kept to a minimum by the shelve module; only items in the cache
+that are currently being served stay in memory.
+
+Proxy:
+The BaseHTTPServer.BaseHTTPRequestHandler takes care of parsing incoming
+requests and managing the socket connection. See the documentation of the
+BaseHTTPServer module for more information. When do_HEAD or do_GET is called,
+the url that we are supposed to fetch is in self.path.
+
+TODO:
+* Implement POST requests. This requires implementing the do_POST method and
+ passing the post data along.
+* Implement different cache policies
+* Added an interface to allow administrators to probe the cache and remove
+ items from the database and such.
+"""
+
+__version__ = "0.1"
+
+import os
+import sys
+import time
+import threading
+import shelve
+from optparse import OptionParser, OptionValueError
+
+import SocketServer
+import BaseHTTPServer
+import socket
+import httplib
+from urlparse import urlsplit, urlunsplit
+
+class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ server_version = "TalosProxy/" + __version__
+ protocol_version = "HTTP/1.1"
+
+ def do_GET(self):
+ content = self.send_head()
+ if content:
+ try:
+ self.wfile.write(content)
+ except socket.error, e:
+ if options.verbose:
+ print "Got socket error %s" % e
+ #self.close_connection = 1
+ def do_HEAD(self):
+ self.send_head()
+
+ def getHeaders(self):
+ h = {}
+ for name in self.headers.keys():
+ h[name] = self.headers[name]
+
+ return h
+
+ def send_head(self, method="GET"):
+ o = urlsplit(self.path)
+
+ #sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ headers = self.getHeaders()
+ for k in "Proxy-Connection", "Connection":
+ if k in headers:
+ headers[k] = "Close"
+ if "Keep-Alive" in headers:
+ del headers["Keep-Alive"]
+
+ reqstring = urlunsplit(('','',o.path, o.query, o.fragment))
+
+ if options.no_cache:
+ cache_result = None
+ else:
+ cache_result = cache.get(o.hostname, o.port, reqstring)
+
+ if not cache_result:
+ if options.localonly:
+ self.send_error(404, "Object not in cache")
+ return None
+ else:
+ if options.verbose:
+ print "Object %s was not in the cache" % self.path
+ conn = httplib.HTTPConnection(o.netloc)
+ conn.request("GET", reqstring, headers=headers)
+ res = conn.getresponse()
+
+ content = res.read()
+ conn.close()
+
+ status, headers = res.status, res.getheaders()
+
+ if not options.no_cache:
+ cache.add(o.hostname, o.port, reqstring, status, headers, content)
+ else:
+ status, headers, content = cache_result
+
+ try:
+ self.send_response(status)
+ for name, value in headers:
+ # kill the transfer-encoding header because we don't support it when
+ # we send data to the client
+ if name not in ('transfer-encoding',):
+ self.send_header(name, value)
+ if "Content-Length" not in headers:
+ self.send_header("Content-Length", str(len(content)))
+ self.end_headers()
+ except socket.error, e:
+ if options.verbose:
+ print "Got socket error %s" % e
+ return None
+ return content
+ def log_message(self, format, *args):
+ if options.verbose:
+ BaseHTTPServer.BaseHTTPRequestHandler.log_message(self, format, *args)
+
+class HTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ def __init__(self, address, handler):
+ BaseHTTPServer.HTTPServer.__init__(self, address, handler)
+
+class Cache(object):
+ """Multithreaded cache uses the shelve module to store pages"""
+ # 20 concurrent threads ought to be enough for one browser
+ max_concurrency = 20
+ def __init__(self, name='', max_concurrency=20):
+ name = name or options.cache or "proxy_cache.db"
+ self.name = name
+ self.max_concurrency = max_concurrency
+ self.entries = {}
+ self.sem = threading.Semaphore(self.max_concurrency)
+ self.semlock = threading.Lock()
+ if options.clear_cache:
+ flag = 'n'
+ else:
+ flag = 'c'
+ self.db = shelve.DbfilenameShelf(name, flag)
+
+ def __del__(self):
+ if hasattr(self, 'db'):
+ self.db.close()
+
+ def get_key(self, host, port, resource):
+ return '%s:%s/%s' % (host, port, resource)
+
+ def get(self, host, port, resource):
+ key = self.get_key(host, port, resource)
+ self.semlock.acquire()
+ self.sem.acquire()
+ self.semlock.release()
+ try:
+ if not self.db.has_key(key):
+ return None
+ # returns status, headers, content
+ return self.db[key]
+ finally:
+ self.sem.release()
+ def add(self, host, port, resource, status, headers, content):
+ key = self.get_key(host, port, resource)
+ self.semlock.acquire()
+ for i in range(self.max_concurrency):
+ self.sem.acquire()
+ self.semlock.release()
+ try:
+ self.db[key] = (status, headers, content)
+ self.db.sync()
+ finally:
+ for i in range(self.max_concurrency):
+ self.sem.release()
+
+class Options(object):
+ port = 8000
+ localonly = False
+ clear_cache = False
+ no_cache = False
+ cache = 'proxy_cache.db'
+ verbose = False
+
+def _parseOptions():
+ def port_callback(option, opt, value, parser):
+ if value > 0 and value < (2 ** 16 - 1):
+ setattr(parser.values, option.dest, value)
+ else:
+ raise OptionValueError("Port number is out of range")
+
+ global options
+ parser = OptionParser(version="Talos Proxy " + __version__)
+ parser.add_option("-p", "--port", dest="port",
+ help="The port to run the proxy server on", metavar="PORT", type="int",
+ action="callback", callback=port_callback)
+ parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
+ help="Include additional debugging information")
+ parser.add_option("-l", "--localonly", action="store_true", dest="localonly",
+ help="Only serve pages from the local database")
+ parser.add_option("-c", "--clear", action="store_true", dest="clear_cache",
+ help="Clear the cache on startup")
+ parser.add_option("-n", "--no-cache", action="store_true", dest="no_cache",
+ help="Do not use a cache")
+ parser.add_option("-u", "--use-cache", dest="cache",
+ help="The filename of the cache to use", metavar="NAME.db")
+ parser.set_defaults(verbose=Options.verbose,
+ port=Options.port,
+ localonly=Options.localonly,
+ clear_cache=Options.clear_cache,
+ no_cache=Options.no_cache,
+ cache=Options.cache)
+ options, args = parser.parse_args()
+
+"""Configures the proxy server. This should be called before run_proxy. It can be
+called afterwards, but note that it is not threadsafe and some options (namely
+port) will not take effect"""
+def configure_proxy(**kwargs):
+ global options
+ options = Options()
+ for key in kwargs:
+ setattr(options, key, kwargs[key])
+
+def _run():
+ global cache
+ cache = Cache()
+ server_address = ('', options.port)
+ httpd = HTTPServer(server_address, HTTPRequestHandler)
+ httpd.serve_forever()
+
+"""Starts the proxy; it runs on a separate daemon thread"""
+def run_proxy():
+ thr = threading.Thread(target=_run)
+ # now when we die, the daemon thread will die too
+ thr.setDaemon(1)
+ thr.start()
+
+if __name__ == '__main__':
+ _parseOptions()
+ try:
+ run_proxy()
+ # thr.join() doesn't terminate on keyboard interrupt
+ while 1: time.sleep(1)
+ except KeyboardInterrupt:
+ if options.verbose:
+ print "Quittin' time..."
+
+__all__ = ['run_proxy', 'configure_proxy']
diff --git a/testing/tools/screenshot/gdk-screenshot.cpp b/testing/tools/screenshot/gdk-screenshot.cpp
new file mode 100644
index 000000000..3248c70fb
--- /dev/null
+++ b/testing/tools/screenshot/gdk-screenshot.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2009, The Mozilla Foundation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Mozilla Foundation nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY The Mozilla Foundation ''AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL The Mozilla Foundation BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contributors:
+ * Ted Mielczarek <ted.mielczarek@gmail.com>
+ * Karl Tomlinson <karlt+@karlt.net>
+ */
+/*
+ * gdk-screenshot.cpp: Save a screenshot of the root window in .png format.
+ * If a filename is specified as the first argument on the commandline,
+ * then the image will be saved to that filename. Otherwise, the image will
+ * be written to stdout.
+ */
+#include <gdk/gdk.h>
+#include <gdk/gdkx.h>
+#ifdef HAVE_LIBXSS
+#include <X11/extensions/scrnsaver.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+
+gboolean save_to_stdout(const gchar *buf, gsize count,
+ GError **error, gpointer data)
+{
+ size_t written = fwrite(buf, 1, count, stdout);
+ if (written != count) {
+ g_set_error(error, G_FILE_ERROR, g_file_error_from_errno(errno),
+ "Write to stdout failed: %s", g_strerror(errno));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+int main(int argc, char** argv)
+{
+ gdk_init(&argc, &argv);
+
+#if defined(HAVE_LIBXSS) && defined(MOZ_WIDGET_GTK)
+ int event_base, error_base;
+ Bool have_xscreensaver =
+ XScreenSaverQueryExtension(GDK_DISPLAY_XDISPLAY(gdk_display_get_default()),
+ &event_base, &error_base);
+
+ if (!have_xscreensaver) {
+ fprintf(stderr, "No XScreenSaver extension on display\n");
+ } else {
+ XScreenSaverInfo* info = XScreenSaverAllocInfo();
+ if (!info) {
+ fprintf(stderr, "%s: Out of memory\n", argv[0]);
+ return 1;
+ }
+ XScreenSaverQueryInfo(GDK_DISPLAY_XDISPLAY(gdk_display_get_default()),
+ GDK_ROOT_WINDOW(), info);
+
+ const char* state;
+ const char* til_or_since = nullptr;
+ switch (info->state) {
+ case ScreenSaverOff:
+ state = "Off";
+ til_or_since = "XScreenSaver will activate after another %lu seconds idle time\n";
+ break;
+ case ScreenSaverOn:
+ state = "On";
+ if (info->til_or_since) {
+ til_or_since = "XScreenSaver idle timer activated %lu seconds ago\n";
+ } else {
+ til_or_since = "XScreenSaver idle activation is disabled\n";
+ }
+ break;
+ case ScreenSaverDisabled:
+ state = "Disabled";
+ break;
+ default:
+ state = "unknown";
+ }
+
+ const char* kind;
+ switch (info->kind) {
+ case ScreenSaverBlanked:
+ kind = "Blanked";
+ break;
+ case ScreenSaverInternal:
+ state = "Internal";
+ break;
+ case ScreenSaverExternal:
+ state = "External";
+ break;
+ default:
+ state = "unknown";
+ }
+
+ fprintf(stderr, "XScreenSaver state: %s\n", state);
+
+ if (til_or_since) {
+ fprintf(stderr, "XScreenSaver kind: %s\n", kind);
+ fprintf(stderr, til_or_since, info->til_or_since / 1000);
+ }
+
+ fprintf(stderr, "User input has been idle for %lu seconds\n", info->idle / 1000);
+
+ XFree(info);
+ }
+#endif
+
+ GdkPixbuf* screenshot = nullptr;
+ GdkWindow* window = gdk_get_default_root_window();
+#if (MOZ_WIDGET_GTK == 2)
+ screenshot = gdk_pixbuf_get_from_drawable(nullptr, window, nullptr,
+ 0, 0, 0, 0,
+ gdk_screen_width(),
+ gdk_screen_height());
+#else
+ screenshot = gdk_pixbuf_get_from_window(window, 0, 0,
+ gdk_window_get_width(window),
+ gdk_window_get_height(window));
+#endif
+ if (!screenshot) {
+ fprintf(stderr, "%s: failed to create screenshot GdkPixbuf\n", argv[0]);
+ return 1;
+ }
+
+ GError* error = nullptr;
+ if (argc > 1) {
+ gdk_pixbuf_save(screenshot, argv[1], "png", &error, nullptr);
+ } else {
+ gdk_pixbuf_save_to_callback(screenshot, save_to_stdout, nullptr,
+ "png", &error, nullptr);
+ }
+ if (error) {
+ fprintf(stderr, "%s: failed to write screenshot as png: %s\n",
+ argv[0], error->message);
+ return error->code;
+ }
+
+ return 0;
+}
+
+// These options are copied from mozglue/build/AsanOptions.cpp
+#ifdef MOZ_ASAN
+extern "C"
+const char* __asan_default_options() {
+ return "allow_user_segv_handler=1:alloc_dealloc_mismatch=0:detect_leaks=0";
+}
+#endif
diff --git a/testing/tools/screenshot/moz.build b/testing/tools/screenshot/moz.build
new file mode 100644
index 000000000..e9e0ebddf
--- /dev/null
+++ b/testing/tools/screenshot/moz.build
@@ -0,0 +1,26 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+if 'gtk' in CONFIG['MOZ_WIDGET_TOOLKIT'] and CONFIG['MOZ_X11']:
+ Program('screentopng')
+ SOURCES += [
+ 'gdk-screenshot.cpp',
+ ]
+ CXXFLAGS += CONFIG['TK_CFLAGS']
+ LDFLAGS += CONFIG['XLDFLAGS']
+ OS_LIBS += CONFIG['TK_LIBS']
+ OS_LIBS += CONFIG['XSS_LIBS']
+elif CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
+ Program('screenshot')
+ SOURCES += [
+ 'win32-screenshot.cpp',
+ ]
+ USE_STATIC_LIBS = True
+ if CONFIG['GNU_CC']:
+ WIN32_EXE_LDFLAGS += ['-municode']
+ OS_LIBS += [
+ 'gdiplus',
+ ]
diff --git a/testing/tools/screenshot/win32-screenshot.cpp b/testing/tools/screenshot/win32-screenshot.cpp
new file mode 100644
index 000000000..0a357ef2f
--- /dev/null
+++ b/testing/tools/screenshot/win32-screenshot.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2009, The Mozilla Foundation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Mozilla Foundation nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY The Mozilla Foundation ''AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL The Mozilla Foundation BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contributors:
+ * Ted Mielczarek <ted.mielczarek@gmail.com>
+ */
+/*
+ * win32-screenshot.cpp: Save a screenshot of the Windows desktop in .png format.
+ * If a filename is specified as the first argument on the commandline,
+ * then the image will be saved to that filename. Otherwise, the image will
+ * be saved as "screenshot.png" in the current working directory.
+ */
+
+ // VS2015: Platform SDK 8.1's GdiplusTypes.h uses the min macro
+#undef NOMINMAX
+#undef WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <gdiplus.h>
+
+// Link w/ subsystem windows so we don't get a console when executing
+// this binary.
+#pragma comment(linker, "/SUBSYSTEM:windows /ENTRY:wmainCRTStartup")
+
+using namespace Gdiplus;
+
+// From http://msdn.microsoft.com/en-us/library/ms533843%28VS.85%29.aspx
+static int GetEncoderClsid(const WCHAR* format, CLSID* pClsid)
+{
+ UINT num = 0; // number of image encoders
+ UINT size = 0; // size of the image encoder array in bytes
+
+ ImageCodecInfo* pImageCodecInfo = nullptr;
+
+ GetImageEncodersSize(&num, &size);
+ if(size == 0)
+ return -1; // Failure
+
+ pImageCodecInfo = (ImageCodecInfo*)(malloc(size));
+ if(pImageCodecInfo == nullptr)
+ return -1; // Failure
+
+ GetImageEncoders(num, size, pImageCodecInfo);
+
+ for(UINT j = 0; j < num; ++j)
+ {
+ if( wcscmp(pImageCodecInfo[j].MimeType, format) == 0 )
+ {
+ *pClsid = pImageCodecInfo[j].Clsid;
+ free(pImageCodecInfo);
+ return j; // Success
+ }
+ }
+
+ free(pImageCodecInfo);
+ return -1; // Failure
+}
+
+#ifdef __MINGW32__
+extern "C"
+#endif
+int wmain(int argc, wchar_t** argv)
+{
+ GdiplusStartupInput gdiplusStartupInput;
+ ULONG_PTR gdiplusToken;
+ GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, nullptr);
+
+ HWND desktop = GetDesktopWindow();
+ HDC desktopdc = GetDC(desktop);
+ HDC mydc = CreateCompatibleDC(desktopdc);
+ int width = GetSystemMetrics(SM_CXSCREEN);
+ int height = GetSystemMetrics(SM_CYSCREEN);
+ HBITMAP mybmp = CreateCompatibleBitmap(desktopdc, width, height);
+ HBITMAP oldbmp = (HBITMAP)SelectObject(mydc, mybmp);
+ BitBlt(mydc,0,0,width,height,desktopdc,0,0, SRCCOPY|CAPTUREBLT);
+ SelectObject(mydc, oldbmp);
+
+ const wchar_t* filename = (argc > 1) ? argv[1] : L"screenshot.png";
+ Bitmap* b = Bitmap::FromHBITMAP(mybmp, nullptr);
+ CLSID encoderClsid;
+ Status stat = GenericError;
+ if (b && GetEncoderClsid(L"image/png", &encoderClsid) != -1) {
+ stat = b->Save(filename, &encoderClsid, nullptr);
+ }
+ if (b)
+ delete b;
+
+ // cleanup
+ GdiplusShutdown(gdiplusToken);
+ ReleaseDC(desktop, desktopdc);
+ DeleteObject(mybmp);
+ DeleteDC(mydc);
+ return stat == Ok ? 0 : 1;
+}
diff --git a/testing/tools/websocketprocessbridge/websocketprocessbridge.py b/testing/tools/websocketprocessbridge/websocketprocessbridge.py
new file mode 100644
index 000000000..57bab31a4
--- /dev/null
+++ b/testing/tools/websocketprocessbridge/websocketprocessbridge.py
@@ -0,0 +1,106 @@
+# vim: set ts=4 et sw=4 tw=80
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from twisted.internet import protocol, reactor
+from twisted.internet.task import LoopingCall
+import txws
+import psutil
+
+import argparse
+import sys
+import os
+
+# maps a command issued via websocket to running an executable with args
+commands = {
+ 'iceserver' : [sys.executable,
+ "-u",
+ os.path.join("iceserver", "iceserver.py")]
+}
+
+class ProcessSide(protocol.ProcessProtocol):
+ """Handles the spawned process (I/O, process termination)"""
+
+ def __init__(self, socketSide):
+ self.socketSide = socketSide
+
+ def outReceived(self, data):
+ if self.socketSide:
+ lines = data.splitlines()
+ for line in lines:
+ self.socketSide.transport.write(line)
+
+ def errReceived(self, data):
+ self.outReceived(data)
+
+ def processEnded(self, reason):
+ if self.socketSide:
+ self.outReceived(str(reason))
+ self.socketSide.processGone()
+
+ def socketGone(self):
+ self.socketSide = None
+ self.transport.loseConnection()
+ self.transport.signalProcess("KILL")
+
+
+class SocketSide(protocol.Protocol):
+ """
+ Handles the websocket (I/O, closed connection), and spawning the process
+ """
+
+ def __init__(self):
+ self.processSide = None
+
+ def dataReceived(self, data):
+ if not self.processSide:
+ self.processSide = ProcessSide(self)
+ # We deliberately crash if |data| isn't on the "menu",
+ # or there is some problem spawning.
+ reactor.spawnProcess(self.processSide,
+ commands[data][0],
+ commands[data],
+ env=os.environ)
+
+ def connectionLost(self, reason):
+ if self.processSide:
+ self.processSide.socketGone()
+
+ def processGone(self):
+ self.processSide = None
+ self.transport.loseConnection()
+
+
+class ProcessSocketBridgeFactory(protocol.Factory):
+ """Builds sockets that can launch/bridge to a process"""
+
+ def buildProtocol(self, addr):
+ return SocketSide()
+
+# Parent process could have already exited, so this is slightly racy. Only
+# alternative is to set up a pipe between parent and child, but that requires
+# special cooperation from the parent.
+parent_process = psutil.Process(os.getpid()).parent()
+
+def check_parent():
+ """ Checks if parent process is still alive, and exits if not """
+ if not parent_process.is_running():
+ print("websocket/process bridge exiting because parent process is gone")
+ reactor.stop()
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='Starts websocket/process bridge.')
+ parser.add_argument("--port", type=str, dest="port", default="8191",
+ help="Port for websocket/process bridge. Default 8191.")
+ args = parser.parse_args()
+
+ parent_checker = LoopingCall(check_parent)
+ parent_checker.start(1)
+
+ bridgeFactory = ProcessSocketBridgeFactory()
+ reactor.listenTCP(int(args.port), txws.WebSocketFactory(bridgeFactory))
+ print("websocket/process bridge listening on port %s" % args.port)
+ reactor.run()
+
+
diff --git a/testing/tools/websocketprocessbridge/websocketprocessbridge_requirements.txt b/testing/tools/websocketprocessbridge/websocketprocessbridge_requirements.txt
new file mode 100644
index 000000000..51e548094
--- /dev/null
+++ b/testing/tools/websocketprocessbridge/websocketprocessbridge_requirements.txt
@@ -0,0 +1,13 @@
+twisted==10.2.0
+
+# websocket adapter for twisted, might be built into twisted someday
+txws==0.9.1
+
+psutil==3.1.1
+
+# needed by txws, but pypi doesn't know about it
+six==1.10.0
+
+# Needed by iceserver
+ipaddr==2.1.11
+passlib==1.6.5