summaryrefslogtreecommitdiffstats
path: root/media/webrtc/trunk/build/android/pylib
diff options
context:
space:
mode:
Diffstat (limited to 'media/webrtc/trunk/build/android/pylib')
-rw-r--r--media/webrtc/trunk/build/android/pylib/__init__.py4
-rw-r--r--media/webrtc/trunk/build/android/pylib/android_commands.py1071
-rw-r--r--media/webrtc/trunk/build/android/pylib/apk_info.py142
-rw-r--r--media/webrtc/trunk/build/android/pylib/base_test_runner.py210
-rw-r--r--media/webrtc/trunk/build/android/pylib/base_test_sharder.py113
-rw-r--r--media/webrtc/trunk/build/android/pylib/buildbot_report.py46
-rw-r--r--media/webrtc/trunk/build/android/pylib/chrome_test_server_spawner.py402
-rw-r--r--media/webrtc/trunk/build/android/pylib/cmd_helper.py50
-rw-r--r--media/webrtc/trunk/build/android/pylib/constants.py47
-rw-r--r--media/webrtc/trunk/build/android/pylib/debug_info.py196
-rw-r--r--media/webrtc/trunk/build/android/pylib/device_stats_monitor.html143
-rw-r--r--media/webrtc/trunk/build/android/pylib/device_stats_monitor.py116
-rw-r--r--media/webrtc/trunk/build/android/pylib/fake_dns.py63
-rw-r--r--media/webrtc/trunk/build/android/pylib/flag_changer.py144
-rw-r--r--media/webrtc/trunk/build/android/pylib/forwarder.py198
-rw-r--r--media/webrtc/trunk/build/android/pylib/io_stats_parser.py32
-rw-r--r--media/webrtc/trunk/build/android/pylib/java_unittest_utils.py27
-rw-r--r--media/webrtc/trunk/build/android/pylib/json_perf_parser.py160
-rw-r--r--media/webrtc/trunk/build/android/pylib/perf_tests_helper.py120
-rw-r--r--media/webrtc/trunk/build/android/pylib/ports.py167
-rw-r--r--media/webrtc/trunk/build/android/pylib/python_test_base.py168
-rw-r--r--media/webrtc/trunk/build/android/pylib/python_test_caller.py84
-rw-r--r--media/webrtc/trunk/build/android/pylib/python_test_sharder.py203
-rw-r--r--media/webrtc/trunk/build/android/pylib/run_java_tests.py591
-rw-r--r--media/webrtc/trunk/build/android/pylib/run_python_tests.py207
-rw-r--r--media/webrtc/trunk/build/android/pylib/run_tests_helper.py26
-rw-r--r--media/webrtc/trunk/build/android/pylib/sharded_tests_queue.py35
-rw-r--r--media/webrtc/trunk/build/android/pylib/single_test_runner.py343
-rw-r--r--media/webrtc/trunk/build/android/pylib/test_info_collection.py137
-rw-r--r--media/webrtc/trunk/build/android/pylib/test_options_parser.py143
-rw-r--r--media/webrtc/trunk/build/android/pylib/test_package.py200
-rw-r--r--media/webrtc/trunk/build/android/pylib/test_package_apk.py121
-rw-r--r--media/webrtc/trunk/build/android/pylib/test_package_executable.py167
-rw-r--r--media/webrtc/trunk/build/android/pylib/test_result.py193
-rw-r--r--media/webrtc/trunk/build/android/pylib/tests_annotations.py89
-rw-r--r--media/webrtc/trunk/build/android/pylib/valgrind_tools.py255
36 files changed, 6413 insertions, 0 deletions
diff --git a/media/webrtc/trunk/build/android/pylib/__init__.py b/media/webrtc/trunk/build/android/pylib/__init__.py
new file mode 100644
index 000000000..727e987e6
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/media/webrtc/trunk/build/android/pylib/android_commands.py b/media/webrtc/trunk/build/android/pylib/android_commands.py
new file mode 100644
index 000000000..8a1562caa
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/android_commands.py
@@ -0,0 +1,1071 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides an interface to communicate with the device via the adb command.
+
+Assumes adb binary is currently on system path.
+"""
+
+import collections
+import datetime
+import logging
+import os
+import re
+import shlex
+import subprocess
+import sys
+import tempfile
+import time
+
+import io_stats_parser
+from pylib import pexpect
+
+CHROME_SRC = os.path.join(
+ os.path.abspath(os.path.dirname(__file__)), '..', '..', '..')
+
+sys.path.append(os.path.join(CHROME_SRC, 'third_party', 'android_testrunner'))
+import adb_interface
+
+import cmd_helper
+import errors # is under ../../../third_party/android_testrunner/errors.py
+
+
+# Pattern to search for the next whole line of pexpect output and capture it
+# into a match group. We can't use ^ and $ for line start end with pexpect,
+# see http://www.noah.org/python/pexpect/#doc for explanation why.
+PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
+
+# Set the adb shell prompt to be a unique marker that will [hopefully] not
+# appear at the start of any line of a command's output.
+SHELL_PROMPT = '~+~PQ\x17RS~+~'
+
+# Java properties file
+LOCAL_PROPERTIES_PATH = '/data/local.prop'
+
+# Property in /data/local.prop that controls Java assertions.
+JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
+
+MEMORY_INFO_RE = re.compile('^(?P<key>\w+):\s+(?P<usage_kb>\d+) kB$')
+NVIDIA_MEMORY_INFO_RE = re.compile('^\s*(?P<user>\S+)\s*(?P<name>\S+)\s*'
+ '(?P<pid>\d+)\s*(?P<usage_bytes>\d+)$')
+
+# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
+KEYCODE_HOME = 3
+KEYCODE_BACK = 4
+KEYCODE_DPAD_UP = 19
+KEYCODE_DPAD_DOWN = 20
+KEYCODE_DPAD_RIGHT = 22
+KEYCODE_ENTER = 66
+KEYCODE_MENU = 82
+
+MD5SUM_DEVICE_PATH = '/data/local/tmp/md5sum_bin'
+
+def GetEmulators():
+ """Returns a list of emulators. Does not filter by status (e.g. offline).
+
+ Both devices starting with 'emulator' will be returned in below output:
+
+ * daemon not running. starting it now on port 5037 *
+ * daemon started successfully *
+ List of devices attached
+ 027c10494100b4d7 device
+ emulator-5554 offline
+ emulator-5558 device
+ """
+ re_device = re.compile('^emulator-[0-9]+', re.MULTILINE)
+ devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
+ return devices
+
+
+def GetAVDs():
+ """Returns a list of AVDs."""
+ re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
+ avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
+ return avds
+
+
+def GetAttachedDevices():
+ """Returns a list of attached, online android devices.
+
+ If a preferred device has been set with ANDROID_SERIAL, it will be first in
+ the returned list.
+
+ Example output:
+
+ * daemon not running. starting it now on port 5037 *
+ * daemon started successfully *
+ List of devices attached
+ 027c10494100b4d7 device
+ emulator-5554 offline
+ """
+ re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
+ devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
+ preferred_device = os.environ.get('ANDROID_SERIAL')
+ if preferred_device in devices:
+ devices.remove(preferred_device)
+ devices.insert(0, preferred_device)
+ return devices
+
+def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
+ """Gets a list of files from `ls` command output.
+
+ Python's os.walk isn't used because it doesn't work over adb shell.
+
+ Args:
+ path: The path to list.
+ ls_output: A list of lines returned by an `ls -lR` command.
+ re_file: A compiled regular expression which parses a line into named groups
+ consisting of at minimum "filename", "date", "time", "size" and
+ optionally "timezone".
+ utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
+ 2-digit string giving the number of UTC offset hours, and MM is a
+ 2-digit string giving the number of UTC offset minutes. If the input
+ utc_offset is None, will try to look for the value of "timezone" if it
+ is specified in re_file.
+
+ Returns:
+ A dict of {"name": (size, lastmod), ...} where:
+ name: The file name relative to |path|'s directory.
+ size: The file size in bytes (0 for directories).
+ lastmod: The file last modification date in UTC.
+ """
+ re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
+ path_dir = os.path.dirname(path)
+
+ current_dir = ''
+ files = {}
+ for line in ls_output:
+ directory_match = re_directory.match(line)
+ if directory_match:
+ current_dir = directory_match.group('dir')
+ continue
+ file_match = re_file.match(line)
+ if file_match:
+ filename = os.path.join(current_dir, file_match.group('filename'))
+ if filename.startswith(path_dir):
+ filename = filename[len(path_dir)+1:]
+ lastmod = datetime.datetime.strptime(
+ file_match.group('date') + ' ' + file_match.group('time')[:5],
+ '%Y-%m-%d %H:%M')
+ if not utc_offset and 'timezone' in re_file.groupindex:
+ utc_offset = file_match.group('timezone')
+ if isinstance(utc_offset, str) and len(utc_offset) == 5:
+ utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
+ minutes=int(utc_offset[3:5]))
+ if utc_offset[0:1] == '-':
+ utc_delta = -utc_delta
+ lastmod -= utc_delta
+ files[filename] = (int(file_match.group('size')), lastmod)
+ return files
+
+def _ComputeFileListHash(md5sum_output):
+ """Returns a list of MD5 strings from the provided md5sum output."""
+ return [line.split(' ')[0] for line in md5sum_output]
+
+def _HasAdbPushSucceeded(command_output):
+ """Returns whether adb push has succeeded from the provided output."""
+ if not command_output:
+ return False
+ # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
+ # Errors look like this: "failed to copy ... "
+ if not re.search('^[0-9]', command_output.splitlines()[-1]):
+ logging.critical('PUSH FAILED: ' + command_output)
+ return False
+ return True
+
+def GetLogTimestamp(log_line, year):
+ """Returns the timestamp of the given |log_line| in the given year."""
+ try:
+ return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
+ '%Y-%m-%d %H:%M:%S.%f')
+ except (ValueError, IndexError):
+ logging.critical('Error reading timestamp from ' + log_line)
+ return None
+
+
+class AndroidCommands(object):
+ """Helper class for communicating with Android device via adb.
+
+ Args:
+ device: If given, adb commands are only send to the device of this ID.
+ Otherwise commands are sent to all attached devices.
+ """
+
+ def __init__(self, device=None):
+ self._adb = adb_interface.AdbInterface()
+ if device:
+ self._adb.SetTargetSerial(device)
+ self._logcat = None
+ self.logcat_process = None
+ self._pushed_files = []
+ self._device_utc_offset = self.RunShellCommand('date +%z')[0]
+ self._md5sum_path = ''
+ self._external_storage = ''
+
+ def Adb(self):
+ """Returns our AdbInterface to avoid us wrapping all its methods."""
+ return self._adb
+
+ def IsRootEnabled(self):
+ """Checks if root is enabled on the device."""
+ root_test_output = self.RunShellCommand('ls /root') or ['']
+ return not 'Permission denied' in root_test_output[0]
+
+ def EnableAdbRoot(self):
+ """Enables adb root on the device.
+
+ Returns:
+ True: if output from executing adb root was as expected.
+ False: otherwise.
+ """
+ return_value = self._adb.EnableAdbRoot()
+ # EnableAdbRoot inserts a call for wait-for-device only when adb logcat
+ # output matches what is expected. Just to be safe add a call to
+ # wait-for-device.
+ self._adb.SendCommand('wait-for-device')
+ return return_value
+
+ def GetDeviceYear(self):
+ """Returns the year information of the date on device."""
+ return self.RunShellCommand('date +%Y')[0]
+
+ def GetExternalStorage(self):
+ if not self._external_storage:
+ self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
+ assert self._external_storage, 'Unable to find $EXTERNAL_STORAGE'
+ return self._external_storage
+
+ def WaitForDevicePm(self):
+ """Blocks until the device's package manager is available.
+
+ To workaround http://b/5201039, we restart the shell and retry if the
+ package manager isn't back after 120 seconds.
+
+ Raises:
+ errors.WaitForResponseTimedOutError after max retries reached.
+ """
+ last_err = None
+ retries = 3
+ while retries:
+ try:
+ self._adb.WaitForDevicePm()
+ return # Success
+ except errors.WaitForResponseTimedOutError as e:
+ last_err = e
+ logging.warning('Restarting and retrying after timeout: %s', e)
+ retries -= 1
+ self.RestartShell()
+ raise last_err # Only reached after max retries, re-raise the last error.
+
+ def RestartShell(self):
+ """Restarts the shell on the device. Does not block for it to return."""
+ self.RunShellCommand('stop')
+ self.RunShellCommand('start')
+
+ def Reboot(self, full_reboot=True):
+ """Reboots the device and waits for the package manager to return.
+
+ Args:
+ full_reboot: Whether to fully reboot the device or just restart the shell.
+ """
+ # TODO(torne): hive can't reboot the device either way without breaking the
+ # connection; work out if we can handle this better
+ if os.environ.get('USING_HIVE'):
+ logging.warning('Ignoring reboot request as we are on hive')
+ return
+ if full_reboot or not self.IsRootEnabled():
+ self._adb.SendCommand('reboot')
+ timeout = 300
+ else:
+ self.RestartShell()
+ timeout = 120
+ # To run tests we need at least the package manager and the sd card (or
+ # other external storage) to be ready.
+ self.WaitForDevicePm()
+ self.WaitForSdCardReady(timeout)
+
+ def Uninstall(self, package):
+ """Uninstalls the specified package from the device.
+
+ Args:
+ package: Name of the package to remove.
+
+ Returns:
+ A status string returned by adb uninstall
+ """
+ uninstall_command = 'uninstall %s' % package
+
+ logging.info('>>> $' + uninstall_command)
+ return self._adb.SendCommand(uninstall_command, timeout_time=60)
+
+ def Install(self, package_file_path, reinstall=False):
+ """Installs the specified package to the device.
+
+ Args:
+ package_file_path: Path to .apk file to install.
+ reinstall: Reinstall an existing apk, keeping the data.
+
+ Returns:
+ A status string returned by adb install
+ """
+ assert os.path.isfile(package_file_path), ('<%s> is not file' %
+ package_file_path)
+
+ install_cmd = ['install']
+
+ if reinstall:
+ install_cmd.append('-r')
+
+ install_cmd.append(package_file_path)
+ install_cmd = ' '.join(install_cmd)
+
+ logging.info('>>> $' + install_cmd)
+ return self._adb.SendCommand(install_cmd, timeout_time=2*60, retry_count=0)
+
+ def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
+ reboots_on_failure=2):
+ """Installs specified package and reboots device on timeouts.
+
+ Args:
+ apk_path: Path to .apk file to install.
+ keep_data: Reinstalls instead of uninstalling first, preserving the
+ application data.
+ package_name: Package name (only needed if keep_data=False).
+ reboots_on_failure: number of time to reboot if package manager is frozen.
+
+ Returns:
+ A status string returned by adb install
+ """
+ reboots_left = reboots_on_failure
+ while True:
+ try:
+ if not keep_data:
+ assert package_name
+ self.Uninstall(package_name)
+ install_status = self.Install(apk_path, reinstall=keep_data)
+ if 'Success' in install_status:
+ return install_status
+ except errors.WaitForResponseTimedOutError:
+ print '@@@STEP_WARNINGS@@@'
+ logging.info('Timeout on installing %s' % apk_path)
+
+ if reboots_left <= 0:
+ raise Exception('Install failure')
+
+ # Force a hard reboot on last attempt
+ self.Reboot(full_reboot=(reboots_left == 1))
+ reboots_left -= 1
+
+ def MakeSystemFolderWritable(self):
+ """Remounts the /system folder rw."""
+ out = self._adb.SendCommand('remount')
+ if out.strip() != 'remount succeeded':
+ raise errors.MsgException('Remount failed: %s' % out)
+
+ def RestartAdbServer(self):
+ """Restart the adb server."""
+ self.KillAdbServer()
+ self.StartAdbServer()
+
+ def KillAdbServer(self):
+ """Kill adb server."""
+ adb_cmd = ['adb', 'kill-server']
+ return cmd_helper.RunCmd(adb_cmd)
+
+ def StartAdbServer(self):
+ """Start adb server."""
+ adb_cmd = ['adb', 'start-server']
+ return cmd_helper.RunCmd(adb_cmd)
+
+ def WaitForSystemBootCompleted(self, wait_time):
+ """Waits for targeted system's boot_completed flag to be set.
+
+ Args:
+ wait_time: time in seconds to wait
+
+ Raises:
+ WaitForResponseTimedOutError if wait_time elapses and flag still not
+ set.
+ """
+ logging.info('Waiting for system boot completed...')
+ self._adb.SendCommand('wait-for-device')
+ # Now the device is there, but system not boot completed.
+ # Query the sys.boot_completed flag with a basic command
+ boot_completed = False
+ attempts = 0
+ wait_period = 5
+ while not boot_completed and (attempts * wait_period) < wait_time:
+ output = self._adb.SendShellCommand('getprop sys.boot_completed',
+ retry_count=1)
+ output = output.strip()
+ if output == '1':
+ boot_completed = True
+ else:
+ # If 'error: xxx' returned when querying the flag, it means
+ # adb server lost the connection to the emulator, so restart the adb
+ # server.
+ if 'error:' in output:
+ self.RestartAdbServer()
+ time.sleep(wait_period)
+ attempts += 1
+ if not boot_completed:
+ raise errors.WaitForResponseTimedOutError(
+ 'sys.boot_completed flag was not set after %s seconds' % wait_time)
+
+ def WaitForSdCardReady(self, timeout_time):
+ """Wait for the SD card ready before pushing data into it."""
+ logging.info('Waiting for SD card ready...')
+ sdcard_ready = False
+ attempts = 0
+ wait_period = 5
+ external_storage = self.GetExternalStorage()
+ while not sdcard_ready and attempts * wait_period < timeout_time:
+ output = self.RunShellCommand('ls ' + external_storage)
+ if output:
+ sdcard_ready = True
+ else:
+ time.sleep(wait_period)
+ attempts += 1
+ if not sdcard_ready:
+ raise errors.WaitForResponseTimedOutError(
+ 'SD card not ready after %s seconds' % timeout_time)
+
+ # It is tempting to turn this function into a generator, however this is not
+ # possible without using a private (local) adb_shell instance (to ensure no
+ # other command interleaves usage of it), which would defeat the main aim of
+ # being able to reuse the adb shell instance across commands.
+ def RunShellCommand(self, command, timeout_time=20, log_result=False):
+ """Send a command to the adb shell and return the result.
+
+ Args:
+ command: String containing the shell command to send. Must not include
+ the single quotes as we use them to escape the whole command.
+ timeout_time: Number of seconds to wait for command to respond before
+ retrying, used by AdbInterface.SendShellCommand.
+ log_result: Boolean to indicate whether we should log the result of the
+ shell command.
+
+ Returns:
+ list containing the lines of output received from running the command
+ """
+ logging.info('>>> $' + command)
+ if "'" in command: logging.warning(command + " contains ' quotes")
+ result = self._adb.SendShellCommand(
+ "'%s'" % command, timeout_time).splitlines()
+ if ['error: device not found'] == result:
+ raise errors.DeviceUnresponsiveError('device not found')
+ if log_result:
+ logging.info('\n>>> '.join(result))
+ return result
+
+ def KillAll(self, process):
+ """Android version of killall, connected via adb.
+
+ Args:
+ process: name of the process to kill off
+
+ Returns:
+ the number of processes killed
+ """
+ pids = self.ExtractPid(process)
+ if pids:
+ self.RunShellCommand('kill ' + ' '.join(pids))
+ return len(pids)
+
+ def KillAllBlocking(self, process, timeout_sec):
+ """Blocking version of killall, connected via adb.
+
+ This waits until no process matching the corresponding name appears in ps'
+ output anymore.
+
+ Args:
+ process: name of the process to kill off
+ timeout_sec: the timeout in seconds
+
+ Returns:
+ the number of processes killed
+ """
+ processes_killed = self.KillAll(process)
+ if processes_killed:
+ elapsed = 0
+ wait_period = 0.1
+ # Note that this doesn't take into account the time spent in ExtractPid().
+ while self.ExtractPid(process) and elapsed < timeout_sec:
+ time.sleep(wait_period)
+ elapsed += wait_period
+ if elapsed >= timeout_sec:
+ return 0
+ return processes_killed
+
+ def StartActivity(self, package, activity, wait_for_completion=False,
+ action='android.intent.action.VIEW',
+ category=None, data=None,
+ extras=None, trace_file_name=None):
+ """Starts |package|'s activity on the device.
+
+ Args:
+ package: Name of package to start (e.g. 'com.google.android.apps.chrome').
+ activity: Name of activity (e.g. '.Main' or
+ 'com.google.android.apps.chrome.Main').
+ wait_for_completion: wait for the activity to finish launching (-W flag).
+ action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
+ category: string (e.g. "android.intent.category.HOME")
+ data: Data string to pass to activity (e.g. 'http://www.example.com/').
+ extras: Dict of extras to pass to activity. Values are significant.
+ trace_file_name: If used, turns on and saves the trace to this file name.
+ """
+ cmd = 'am start -a %s' % action
+ if wait_for_completion:
+ cmd += ' -W'
+ if category:
+ cmd += ' -c %s' % category
+ if package and activity:
+ cmd += ' -n %s/%s' % (package, activity)
+ if data:
+ cmd += ' -d "%s"' % data
+ if extras:
+ for key in extras:
+ value = extras[key]
+ if isinstance(value, str):
+ cmd += ' --es'
+ elif isinstance(value, bool):
+ cmd += ' --ez'
+ elif isinstance(value, int):
+ cmd += ' --ei'
+ else:
+ raise NotImplementedError(
+ 'Need to teach StartActivity how to pass %s extras' % type(value))
+ cmd += ' %s %s' % (key, value)
+ if trace_file_name:
+ cmd += ' --start-profiler ' + trace_file_name
+ self.RunShellCommand(cmd)
+
+ def GoHome(self):
+ """Tell the device to return to the home screen. Blocks until completion."""
+ self.RunShellCommand('am start -W '
+ '-a android.intent.action.MAIN -c android.intent.category.HOME')
+
+ def CloseApplication(self, package):
+ """Attempt to close down the application, using increasing violence.
+
+ Args:
+ package: Name of the process to kill off, e.g.
+ com.google.android.apps.chrome
+ """
+ self.RunShellCommand('am force-stop ' + package)
+
+ def ClearApplicationState(self, package):
+ """Closes and clears all state for the given |package|."""
+ self.CloseApplication(package)
+ self.RunShellCommand('rm -r /data/data/%s/app_*' % package)
+ self.RunShellCommand('rm -r /data/data/%s/cache/*' % package)
+ self.RunShellCommand('rm -r /data/data/%s/files/*' % package)
+ self.RunShellCommand('rm -r /data/data/%s/shared_prefs/*' % package)
+
+ def SendKeyEvent(self, keycode):
+ """Sends keycode to the device.
+
+ Args:
+ keycode: Numeric keycode to send (see "enum" at top of file).
+ """
+ self.RunShellCommand('input keyevent %d' % keycode)
+
+ def PushIfNeeded(self, local_path, device_path):
+ """Pushes |local_path| to |device_path|.
+
+ Works for files and directories. This method skips copying any paths in
+ |test_data_paths| that already exist on the device with the same hash.
+
+ All pushed files can be removed by calling RemovePushedFiles().
+ """
+ assert os.path.exists(local_path), 'Local path not found %s' % local_path
+
+ if not self._md5sum_path:
+ default_build_type = os.environ.get('BUILD_TYPE', 'Debug')
+ md5sum_path = '%s/out/%s/md5sum_bin' % (CHROME_SRC, default_build_type)
+ if not os.path.exists(md5sum_path):
+ md5sum_path = '%s/out/Release/md5sum_bin' % (CHROME_SRC)
+ if not os.path.exists(md5sum_path):
+ print >> sys.stderr, 'Please build md5sum.'
+ sys.exit(1)
+ command = 'push %s %s' % (md5sum_path, MD5SUM_DEVICE_PATH)
+ assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
+ self._md5sum_path = md5sum_path
+
+ self._pushed_files.append(device_path)
+ hashes_on_device = _ComputeFileListHash(
+ self.RunShellCommand(MD5SUM_DEVICE_PATH + ' ' + device_path))
+ assert os.path.exists(local_path), 'Local path not found %s' % local_path
+ hashes_on_host = _ComputeFileListHash(
+ subprocess.Popen(
+ '%s_host %s' % (self._md5sum_path, local_path),
+ stdout=subprocess.PIPE, shell=True).stdout)
+ if hashes_on_device == hashes_on_host:
+ return
+
+ # They don't match, so remove everything first and then create it.
+ if os.path.isdir(local_path):
+ self.RunShellCommand('rm -r %s' % device_path, timeout_time=2*60)
+ self.RunShellCommand('mkdir -p %s' % device_path)
+
+ # NOTE: We can't use adb_interface.Push() because it hardcodes a timeout of
+ # 60 seconds which isn't sufficient for a lot of users of this method.
+ push_command = 'push %s %s' % (local_path, device_path)
+ logging.info('>>> $' + push_command)
+ output = self._adb.SendCommand(push_command, timeout_time=30*60)
+ assert _HasAdbPushSucceeded(output)
+
+
+ def GetFileContents(self, filename, log_result=False):
+ """Gets contents from the file specified by |filename|."""
+ return self.RunShellCommand('if [ -f "' + filename + '" ]; then cat "' +
+ filename + '"; fi', log_result=log_result)
+
+ def SetFileContents(self, filename, contents):
+ """Writes |contents| to the file specified by |filename|."""
+ with tempfile.NamedTemporaryFile() as f:
+ f.write(contents)
+ f.flush()
+ self._adb.Push(f.name, filename)
+
+ def RemovePushedFiles(self):
+ """Removes all files pushed with PushIfNeeded() from the device."""
+ for p in self._pushed_files:
+ self.RunShellCommand('rm -r %s' % p, timeout_time=2*60)
+
+ def ListPathContents(self, path):
+ """Lists files in all subdirectories of |path|.
+
+ Args:
+ path: The path to list.
+
+ Returns:
+ A dict of {"name": (size, lastmod), ...}.
+ """
+ # Example output:
+ # /foo/bar:
+ # -rw-r----- 1 user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
+ re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
+ '(?P<user>[^\s]+)\s+'
+ '(?P<group>[^\s]+)\s+'
+ '(?P<size>[^\s]+)\s+'
+ '(?P<date>[^\s]+)\s+'
+ '(?P<time>[^\s]+)\s+'
+ '(?P<filename>[^\s]+)$')
+ return _GetFilesFromRecursiveLsOutput(
+ path, self.RunShellCommand('ls -lR %s' % path), re_file,
+ self._device_utc_offset)
+
+
+ def SetJavaAssertsEnabled(self, enable):
+ """Sets or removes the device java assertions property.
+
+ Args:
+ enable: If True the property will be set.
+
+ Returns:
+ True if the file was modified (reboot is required for it to take effect).
+ """
+ # First ensure the desired property is persisted.
+ temp_props_file = tempfile.NamedTemporaryFile()
+ properties = ''
+ if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
+ properties = file(temp_props_file.name).read()
+ re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
+ r'\s*=\s*all\s*$', re.MULTILINE)
+ if enable != bool(re.search(re_search, properties)):
+ re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
+ r'\s*=\s*\w+\s*$', re.MULTILINE)
+ properties = re.sub(re_replace, '', properties)
+ if enable:
+ properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
+
+ file(temp_props_file.name, 'w').write(properties)
+ self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
+
+ # Next, check the current runtime value is what we need, and
+ # if not, set it and report that a reboot is required.
+ was_set = 'all' in self.RunShellCommand('getprop ' + JAVA_ASSERT_PROPERTY)
+ if was_set == enable:
+ return False
+
+ self.RunShellCommand('setprop %s "%s"' % (JAVA_ASSERT_PROPERTY,
+ enable and 'all' or ''))
+ return True
+
+ def GetBuildId(self):
+ """Returns the build ID of the system (e.g. JRM79C)."""
+ build_id = self.RunShellCommand('getprop ro.build.id')[0]
+ assert build_id
+ return build_id
+
+ def GetBuildType(self):
+ """Returns the build type of the system (e.g. eng)."""
+ build_type = self.RunShellCommand('getprop ro.build.type')[0]
+ assert build_type
+ return build_type
+
+ def StartMonitoringLogcat(self, clear=True, timeout=10, logfile=None,
+ filters=None):
+ """Starts monitoring the output of logcat, for use with WaitForLogMatch.
+
+ Args:
+ clear: If True the existing logcat output will be cleared, to avoiding
+ matching historical output lurking in the log.
+ timeout: How long WaitForLogMatch will wait for the given match
+ filters: A list of logcat filters to be used.
+ """
+ if clear:
+ self.RunShellCommand('logcat -c')
+ args = []
+ if self._adb._target_arg:
+ args += shlex.split(self._adb._target_arg)
+ args += ['logcat', '-v', 'threadtime']
+ if filters:
+ args.extend(filters)
+ else:
+ args.append('*:v')
+
+ if logfile:
+ logfile = NewLineNormalizer(logfile)
+
+ # Spawn logcat and syncronize with it.
+ for _ in range(4):
+ self._logcat = pexpect.spawn('adb', args, timeout=timeout,
+ logfile=logfile)
+ self.RunShellCommand('log startup_sync')
+ if self._logcat.expect(['startup_sync', pexpect.EOF,
+ pexpect.TIMEOUT]) == 0:
+ break
+ self._logcat.close(force=True)
+ else:
+ logging.critical('Error reading from logcat: ' + str(self._logcat.match))
+ sys.exit(1)
+
+ def GetMonitoredLogCat(self):
+ """Returns an "adb logcat" command as created by pexpected.spawn."""
+ if not self._logcat:
+ self.StartMonitoringLogcat(clear=False)
+ return self._logcat
+
+ def WaitForLogMatch(self, success_re, error_re, clear=False):
+ """Blocks until a matching line is logged or a timeout occurs.
+
+ Args:
+ success_re: A compiled re to search each line for.
+ error_re: A compiled re which, if found, terminates the search for
+ |success_re|. If None is given, no error condition will be detected.
+ clear: If True the existing logcat output will be cleared, defaults to
+ false.
+
+ Raises:
+ pexpect.TIMEOUT upon the timeout specified by StartMonitoringLogcat().
+
+ Returns:
+ The re match object if |success_re| is matched first or None if |error_re|
+ is matched first.
+ """
+ logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
+ t0 = time.time()
+ while True:
+ if not self._logcat:
+ self.StartMonitoringLogcat(clear)
+ try:
+ while True:
+ # Note this will block for upto the timeout _per log line_, so we need
+ # to calculate the overall timeout remaining since t0.
+ time_remaining = t0 + self._logcat.timeout - time.time()
+ if time_remaining < 0: raise pexpect.TIMEOUT(self._logcat)
+ self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
+ line = self._logcat.match.group(1)
+ if error_re:
+ error_match = error_re.search(line)
+ if error_match:
+ return None
+ success_match = success_re.search(line)
+ if success_match:
+ return success_match
+ logging.info('<<< Skipped Logcat Line:' + str(line))
+ except pexpect.TIMEOUT:
+ raise pexpect.TIMEOUT(
+ 'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
+ 'to debug)' %
+ (self._logcat.timeout, success_re.pattern))
+ except pexpect.EOF:
+ # It seems that sometimes logcat can end unexpectedly. This seems
+ # to happen during Chrome startup after a reboot followed by a cache
+ # clean. I don't understand why this happens, but this code deals with
+ # getting EOF in logcat.
+ logging.critical('Found EOF in adb logcat. Restarting...')
+ # Rerun spawn with original arguments. Note that self._logcat.args[0] is
+ # the path of adb, so we don't want it in the arguments.
+ self._logcat = pexpect.spawn('adb',
+ self._logcat.args[1:],
+ timeout=self._logcat.timeout,
+ logfile=self._logcat.logfile)
+
+ def StartRecordingLogcat(self, clear=True, filters=['*:v']):
+ """Starts recording logcat output to eventually be saved as a string.
+
+ This call should come before some series of tests are run, with either
+ StopRecordingLogcat or SearchLogcatRecord following the tests.
+
+ Args:
+ clear: True if existing log output should be cleared.
+ filters: A list of logcat filters to be used.
+ """
+ if clear:
+ self._adb.SendCommand('logcat -c')
+ logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
+ ' '.join(filters))
+ self.logcat_process = subprocess.Popen(logcat_command, shell=True,
+ stdout=subprocess.PIPE)
+
+ def StopRecordingLogcat(self):
+ """Stops an existing logcat recording subprocess and returns output.
+
+ Returns:
+ The logcat output as a string or an empty string if logcat was not
+ being recorded at the time.
+ """
+ if not self.logcat_process:
+ return ''
+ # Cannot evaluate directly as 0 is a possible value.
+ # Better to read the self.logcat_process.stdout before killing it,
+ # Otherwise the communicate may return incomplete output due to pipe break.
+ if self.logcat_process.poll() is None:
+ self.logcat_process.kill()
+ (output, _) = self.logcat_process.communicate()
+ self.logcat_process = None
+ return output
+
+ def SearchLogcatRecord(self, record, message, thread_id=None, proc_id=None,
+ log_level=None, component=None):
+ """Searches the specified logcat output and returns results.
+
+ This method searches through the logcat output specified by record for a
+ certain message, narrowing results by matching them against any other
+ specified criteria. It returns all matching lines as described below.
+
+ Args:
+ record: A string generated by Start/StopRecordingLogcat to search.
+ message: An output string to search for.
+ thread_id: The thread id that is the origin of the message.
+ proc_id: The process that is the origin of the message.
+ log_level: The log level of the message.
+ component: The name of the component that would create the message.
+
+ Returns:
+ A list of dictionaries represeting matching entries, each containing keys
+ thread_id, proc_id, log_level, component, and message.
+ """
+ if thread_id:
+ thread_id = str(thread_id)
+ if proc_id:
+ proc_id = str(proc_id)
+ results = []
+ reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
+ re.MULTILINE)
+ log_list = reg.findall(record)
+ for (tid, pid, log_lev, comp, msg) in log_list:
+ if ((not thread_id or thread_id == tid) and
+ (not proc_id or proc_id == pid) and
+ (not log_level or log_level == log_lev) and
+ (not component or component == comp) and msg.find(message) > -1):
+ match = dict({'thread_id': tid, 'proc_id': pid,
+ 'log_level': log_lev, 'component': comp,
+ 'message': msg})
+ results.append(match)
+ return results
+
+ def ExtractPid(self, process_name):
+ """Extracts Process Ids for a given process name from Android Shell.
+
+ Args:
+ process_name: name of the process on the device.
+
+ Returns:
+ List of all the process ids (as strings) that match the given name.
+ If the name of a process exactly matches the given name, the pid of
+ that process will be inserted to the front of the pid list.
+ """
+ pids = []
+ for line in self.RunShellCommand('ps', log_result=False):
+ data = line.split()
+ try:
+ if process_name in data[-1]: # name is in the last column
+ if process_name == data[-1]:
+ pids.insert(0, data[1]) # PID is in the second column
+ else:
+ pids.append(data[1])
+ except IndexError:
+ pass
+ return pids
+
+ def GetIoStats(self):
+ """Gets cumulative disk IO stats since boot (for all processes).
+
+ Returns:
+ Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
+ was an error.
+ """
+ for line in self.GetFileContents('/proc/diskstats', log_result=False):
+ stats = io_stats_parser.ParseIoStatsLine(line)
+ if stats.device == 'mmcblk0':
+ return {
+ 'num_reads': stats.num_reads_issued,
+ 'num_writes': stats.num_writes_completed,
+ 'read_ms': stats.ms_spent_reading,
+ 'write_ms': stats.ms_spent_writing,
+ }
+ logging.warning('Could not find disk IO stats.')
+ return None
+
+ def GetMemoryUsageForPid(self, pid):
+ """Returns the memory usage for given pid.
+
+ Args:
+ pid: The pid number of the specific process running on device.
+
+ Returns:
+ A tuple containg:
+ [0]: Dict of {metric:usage_kb}, for the process which has specified pid.
+ The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
+ Shared_Dirty, Private_Clean, Private_Dirty, Referenced, Swap,
+ KernelPageSize, MMUPageSize, Nvidia (tablet only).
+ [1]: Detailed /proc/[PID]/smaps information.
+ """
+ usage_dict = collections.defaultdict(int)
+ smaps = collections.defaultdict(dict)
+ current_smap = ''
+ for line in self.GetFileContents('/proc/%s/smaps' % pid, log_result=False):
+ items = line.split()
+ # See man 5 proc for more details. The format is:
+ # address perms offset dev inode pathname
+ if len(items) > 5:
+ current_smap = ' '.join(items[5:])
+ elif len(items) > 3:
+ current_smap = ' '.join(items[3:])
+ match = re.match(MEMORY_INFO_RE, line)
+ if match:
+ key = match.group('key')
+ usage_kb = int(match.group('usage_kb'))
+ usage_dict[key] += usage_kb
+ if key not in smaps[current_smap]:
+ smaps[current_smap][key] = 0
+ smaps[current_smap][key] += usage_kb
+ if not usage_dict or not any(usage_dict.values()):
+ # Presumably the process died between ps and calling this method.
+ logging.warning('Could not find memory usage for pid ' + str(pid))
+
+ for line in self.GetFileContents('/d/nvmap/generic-0/clients',
+ log_result=False):
+ match = re.match(NVIDIA_MEMORY_INFO_RE, line)
+ if match and match.group('pid') == pid:
+ usage_bytes = int(match.group('usage_bytes'))
+ usage_dict['Nvidia'] = int(round(usage_bytes / 1000.0)) # kB
+ break
+
+ return (usage_dict, smaps)
+
+ def GetMemoryUsageForPackage(self, package):
+ """Returns the memory usage for all processes whose name contains |pacakge|.
+
+ Args:
+ package: A string holding process name to lookup pid list for.
+
+ Returns:
+ A tuple containg:
+ [0]: Dict of {metric:usage_kb}, summed over all pids associated with
+ |name|.
+ The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
+ Shared_Dirty, Private_Clean, Private_Dirty, Referenced, Swap,
+ KernelPageSize, MMUPageSize, Nvidia (tablet only).
+ [1]: a list with detailed /proc/[PID]/smaps information.
+ """
+ usage_dict = collections.defaultdict(int)
+ pid_list = self.ExtractPid(package)
+ smaps = collections.defaultdict(dict)
+
+ for pid in pid_list:
+ usage_dict_per_pid, smaps_per_pid = self.GetMemoryUsageForPid(pid)
+ smaps[pid] = smaps_per_pid
+ for (key, value) in usage_dict_per_pid.items():
+ usage_dict[key] += value
+
+ return usage_dict, smaps
+
+ def ProcessesUsingDevicePort(self, device_port):
+ """Lists processes using the specified device port on loopback interface.
+
+ Args:
+ device_port: Port on device we want to check.
+
+ Returns:
+ A list of (pid, process_name) tuples using the specified port.
+ """
+ tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
+ tcp_address = '0100007F:%04X' % device_port
+ pids = []
+ for single_connect in tcp_results:
+ connect_results = single_connect.split()
+ # Column 1 is the TCP port, and Column 9 is the inode of the socket
+ if connect_results[1] == tcp_address:
+ socket_inode = connect_results[9]
+ socket_name = 'socket:[%s]' % socket_inode
+ lsof_results = self.RunShellCommand('lsof', log_result=False)
+ for single_process in lsof_results:
+ process_results = single_process.split()
+ # Ignore the line if it has less than nine columns in it, which may
+ # be the case when a process stops while lsof is executing.
+ if len(process_results) <= 8:
+ continue
+ # Column 0 is the executable name
+ # Column 1 is the pid
+ # Column 8 is the Inode in use
+ if process_results[8] == socket_name:
+ pids.append((int(process_results[1]), process_results[0]))
+ break
+ logging.info('PidsUsingDevicePort: %s', pids)
+ return pids
+
+ def FileExistsOnDevice(self, file_name):
+ """Checks whether the given file exists on the device.
+
+ Args:
+ file_name: Full path of file to check.
+
+ Returns:
+ True if the file exists, False otherwise.
+ """
+ assert '"' not in file_name, 'file_name cannot contain double quotes'
+ status = self._adb.SendShellCommand(
+ '\'test -e "%s"; echo $?\'' % (file_name))
+ if 'test: not found' not in status:
+ return int(status) == 0
+
+ status = self._adb.SendShellCommand(
+ '\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
+ return int(status) == 0
+
+
+class NewLineNormalizer(object):
+ """A file-like object to normalize EOLs to '\n'.
+
+ Pexpect runs adb within a pseudo-tty device (see
+ http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
+ as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
+ lines, the log ends up having '\r\r\n' at the end of each line. This
+ filter replaces the above with a single '\n' in the data stream.
+ """
+ def __init__(self, output):
+ self._output = output
+
+ def write(self, data):
+ data = data.replace('\r\r\n', '\n')
+ self._output.write(data)
+
+ def flush(self):
+ self._output.flush()
+
diff --git a/media/webrtc/trunk/build/android/pylib/apk_info.py b/media/webrtc/trunk/build/android/pylib/apk_info.py
new file mode 100644
index 000000000..7e8867570
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/apk_info.py
@@ -0,0 +1,142 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Gathers information about APKs."""
+
+import collections
+import os
+import re
+
+import cmd_helper
+
+
+class ApkInfo(object):
+ """Helper class for inspecting APKs."""
+ _PROGUARD_PATH = os.path.join(os.environ['ANDROID_SDK_ROOT'],
+ 'tools/proguard/bin/proguard.sh')
+ if not os.path.exists(_PROGUARD_PATH):
+ _PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'],
+ 'external/proguard/bin/proguard.sh')
+ _PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
+ _PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
+ _PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$')
+ _PROGUARD_ANNOTATION_CONST_RE = re.compile(r'\s*?- Constant element value.*$')
+ _PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$')
+ _AAPT_PACKAGE_NAME_RE = re.compile(r'package: .*name=\'(\S*)\'')
+
+ def __init__(self, apk_path, jar_path):
+ if not os.path.exists(apk_path):
+ raise Exception('%s not found, please build it' % apk_path)
+ self._apk_path = apk_path
+ if not os.path.exists(jar_path):
+ raise Exception('%s not found, please build it' % jar_path)
+ self._jar_path = jar_path
+ self._annotation_map = collections.defaultdict(list)
+ self._test_methods = []
+ self._Initialize()
+
+ def _Initialize(self):
+ proguard_output = cmd_helper.GetCmdOutput([self._PROGUARD_PATH,
+ '-injars', self._jar_path,
+ '-dontshrink',
+ '-dontoptimize',
+ '-dontobfuscate',
+ '-dontpreverify',
+ '-dump',
+ ]).split('\n')
+ clazz = None
+ method = None
+ annotation = None
+ has_value = False
+ qualified_method = None
+ for line in proguard_output:
+ m = self._PROGUARD_CLASS_RE.match(line)
+ if m:
+ clazz = m.group(1).replace('/', '.') # Change package delim.
+ annotation = None
+ continue
+ m = self._PROGUARD_METHOD_RE.match(line)
+ if m:
+ method = m.group(1)
+ annotation = None
+ qualified_method = clazz + '#' + method
+ if method.startswith('test') and clazz.endswith('Test'):
+ self._test_methods += [qualified_method]
+ continue
+ m = self._PROGUARD_ANNOTATION_RE.match(line)
+ if m:
+ assert qualified_method
+ annotation = m.group(1).split('/')[-1] # Ignore the annotation package.
+ self._annotation_map[qualified_method].append(annotation)
+ has_value = False
+ continue
+ if annotation:
+ assert qualified_method
+ if not has_value:
+ m = self._PROGUARD_ANNOTATION_CONST_RE.match(line)
+ if m:
+ has_value = True
+ else:
+ m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line)
+ if m:
+ value = m.group(1)
+ self._annotation_map[qualified_method].append(
+ annotation + ':' + value)
+ has_value = False
+
+ def _GetAnnotationMap(self):
+ return self._annotation_map
+
+ def _IsTestMethod(self, test):
+ class_name, method = test.split('#')
+ return class_name.endswith('Test') and method.startswith('test')
+
+ def GetApkPath(self):
+ return self._apk_path
+
+ def GetPackageName(self):
+ """Returns the package name of this APK."""
+ aapt_output = cmd_helper.GetCmdOutput(
+ ['aapt', 'dump', 'badging', self._apk_path]).split('\n')
+ for line in aapt_output:
+ m = self._AAPT_PACKAGE_NAME_RE.match(line)
+ if m:
+ return m.group(1)
+ raise Exception('Failed to determine package name of %s' % self._apk_path)
+
+ def GetTestAnnotations(self, test):
+ """Returns a list of all annotations for the given |test|. May be empty."""
+ if not self._IsTestMethod(test):
+ return []
+ return self._GetAnnotationMap()[test]
+
+ def _AnnotationsMatchFilters(self, annotation_filter_list, annotations):
+ """Checks if annotations match any of the filters."""
+ if not annotation_filter_list:
+ return True
+ for annotation_filter in annotation_filter_list:
+ filters = annotation_filter.split('=')
+ if len(filters) == 2:
+ key = filters[0]
+ value_list = filters[1].split(',')
+ for value in value_list:
+ if key + ':' + value in annotations:
+ return True
+ elif annotation_filter in annotations:
+ return True
+ return False
+
+ def GetAnnotatedTests(self, annotation_filter_list):
+ """Returns a list of all tests that match the given annotation filters."""
+ return [test for test, annotations in self._GetAnnotationMap().iteritems()
+ if self._IsTestMethod(test) and self._AnnotationsMatchFilters(
+ annotation_filter_list, annotations)]
+
+ def GetTestMethods(self):
+ """Returns a list of all test methods in this apk as Class#testMethod."""
+ return self._test_methods
+
+ @staticmethod
+ def IsPythonDrivenTest(test):
+ return 'pythonDrivenTests' in test
diff --git a/media/webrtc/trunk/build/android/pylib/base_test_runner.py b/media/webrtc/trunk/build/android/pylib/base_test_runner.py
new file mode 100644
index 000000000..619bc6edf
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/base_test_runner.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import contextlib
+import httplib
+import logging
+import os
+import tempfile
+import time
+
+import android_commands
+import constants
+from chrome_test_server_spawner import SpawningServer
+import constants
+from flag_changer import FlagChanger
+from forwarder import Forwarder
+import lighttpd_server
+import ports
+from valgrind_tools import CreateTool
+
+
+# A file on device to store ports of net test server. The format of the file is
+# test-spawner-server-port:test-server-port
+NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
+
+
+class BaseTestRunner(object):
+ """Base class for running tests on a single device.
+
+ A subclass should implement RunTests() with no parameter, so that calling
+ the Run() method will set up tests, run them and tear them down.
+ """
+
+ def __init__(self, device, tool, shard_index, build_type):
+ """
+ Args:
+ device: Tests will run on the device of this ID.
+ shard_index: Index number of the shard on which the test suite will run.
+ build_type: 'Release' or 'Debug'.
+ """
+ self.device = device
+ self.adb = android_commands.AndroidCommands(device=device)
+ self.tool = CreateTool(tool, self.adb)
+ self._http_server = None
+ self._forwarder = None
+ self._forwarder_device_port = 8000
+ self.forwarder_base_url = ('http://localhost:%d' %
+ self._forwarder_device_port)
+ self.flags = FlagChanger(self.adb)
+ self.shard_index = shard_index
+ self.flags.AddFlags(['--disable-fre'])
+ self._spawning_server = None
+ self._spawner_forwarder = None
+ # We will allocate port for test server spawner when calling method
+ # LaunchChromeTestServerSpawner and allocate port for test server when
+ # starting it in TestServerThread.
+ self.test_server_spawner_port = 0
+ self.test_server_port = 0
+ self.build_type = build_type
+
+ def _PushTestServerPortInfoToDevice(self):
+ """Pushes the latest port information to device."""
+ self.adb.SetFileContents(self.adb.GetExternalStorage() + '/' +
+ NET_TEST_SERVER_PORT_INFO_FILE,
+ '%d:%d' % (self.test_server_spawner_port,
+ self.test_server_port))
+
+ def Run(self):
+ """Calls subclass functions to set up tests, run them and tear them down.
+
+ Returns:
+ Test results returned from RunTests().
+ """
+ if not self.HasTests():
+ return True
+ self.SetUp()
+ try:
+ return self.RunTests()
+ finally:
+ self.TearDown()
+
+ def SetUp(self):
+ """Called before tests run."""
+ pass
+
+ def HasTests(self):
+ """Whether the test suite has tests to run."""
+ return True
+
+ def RunTests(self):
+ """Runs the tests. Need to be overridden."""
+ raise NotImplementedError
+
+ def TearDown(self):
+ """Called when tests finish running."""
+ self.ShutdownHelperToolsForTestSuite()
+
+ def CopyTestData(self, test_data_paths, dest_dir):
+ """Copies |test_data_paths| list of files/directories to |dest_dir|.
+
+ Args:
+ test_data_paths: A list of files or directories relative to |dest_dir|
+ which should be copied to the device. The paths must exist in
+ |CHROME_DIR|.
+ dest_dir: Absolute path to copy to on the device.
+ """
+ for p in test_data_paths:
+ self.adb.PushIfNeeded(
+ os.path.join(constants.CHROME_DIR, p),
+ os.path.join(dest_dir, p))
+
+ def LaunchTestHttpServer(self, document_root, port=None,
+ extra_config_contents=None):
+ """Launches an HTTP server to serve HTTP tests.
+
+ Args:
+ document_root: Document root of the HTTP server.
+ port: port on which we want to the http server bind.
+ extra_config_contents: Extra config contents for the HTTP server.
+ """
+ self._http_server = lighttpd_server.LighttpdServer(
+ document_root, port=port, extra_config_contents=extra_config_contents)
+ if self._http_server.StartupHttpServer():
+ logging.info('http server started: http://localhost:%s',
+ self._http_server.port)
+ else:
+ logging.critical('Failed to start http server')
+ self.StartForwarderForHttpServer()
+ return (self._forwarder_device_port, self._http_server.port)
+
+ def StartForwarder(self, port_pairs):
+ """Starts TCP traffic forwarding for the given |port_pairs|.
+
+ Args:
+ host_port_pairs: A list of (device_port, local_port) tuples to forward.
+ """
+ if self._forwarder:
+ self._forwarder.Close()
+ self._forwarder = Forwarder(
+ self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
+
+ def StartForwarderForHttpServer(self):
+ """Starts a forwarder for the HTTP server.
+
+ The forwarder forwards HTTP requests and responses between host and device.
+ """
+ self.StartForwarder([(self._forwarder_device_port, self._http_server.port)])
+
+ def RestartHttpServerForwarderIfNecessary(self):
+ """Restarts the forwarder if it's not open."""
+ # Checks to see if the http server port is being used. If not forwards the
+ # request.
+ # TODO(dtrainor): This is not always reliable because sometimes the port
+ # will be left open even after the forwarder has been killed.
+ if not ports.IsDevicePortUsed(self.adb,
+ self._forwarder_device_port):
+ self.StartForwarderForHttpServer()
+
+ def ShutdownHelperToolsForTestSuite(self):
+ """Shuts down the server and the forwarder."""
+ # Forwarders should be killed before the actual servers they're forwarding
+ # to as they are clients potentially with open connections and to allow for
+ # proper hand-shake/shutdown.
+ if self._forwarder or self._spawner_forwarder:
+ # Kill all forwarders on the device and then kill the process on the host
+ # (if it exists)
+ self.adb.KillAll('device_forwarder')
+ if self._forwarder:
+ self._forwarder.Close()
+ if self._spawner_forwarder:
+ self._spawner_forwarder.Close()
+ if self._http_server:
+ self._http_server.ShutdownHttpServer()
+ if self._spawning_server:
+ self._spawning_server.Stop()
+ self.flags.Restore()
+
+ def LaunchChromeTestServerSpawner(self):
+ """Launches test server spawner."""
+ server_ready = False
+ error_msgs = []
+ # Try 3 times to launch test spawner server.
+ for i in xrange(0, 3):
+ # Do not allocate port for test server here. We will allocate
+ # different port for individual test in TestServerThread.
+ self.test_server_spawner_port = ports.AllocateTestServerPort()
+ self._spawning_server = SpawningServer(self.test_server_spawner_port,
+ self.adb,
+ self.tool,
+ self.build_type)
+ self._spawning_server.Start()
+ server_ready, error_msg = ports.IsHttpServerConnectable(
+ '127.0.0.1', self.test_server_spawner_port, path='/ping',
+ expected_read='ready')
+ if server_ready:
+ break
+ else:
+ error_msgs.append(error_msg)
+ self._spawning_server.Stop()
+ # Wait for 2 seconds then restart.
+ time.sleep(2)
+ if not server_ready:
+ logging.error(';'.join(error_msgs))
+ raise Exception('Can not start the test spawner server.')
+ self._PushTestServerPortInfoToDevice()
+ self._spawner_forwarder = Forwarder(
+ self.adb,
+ [(self.test_server_spawner_port, self.test_server_spawner_port)],
+ self.tool, '127.0.0.1', self.build_type)
diff --git a/media/webrtc/trunk/build/android/pylib/base_test_sharder.py b/media/webrtc/trunk/build/android/pylib/base_test_sharder.py
new file mode 100644
index 000000000..48206c202
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/base_test_sharder.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+import multiprocessing
+
+from test_result import TestResults
+
+
+def _ShardedTestRunnable(test):
+ """Standalone function needed by multiprocessing.Pool."""
+ log_format = '[' + test.device + '] # %(asctime)-15s: %(message)s'
+ if logging.getLogger().handlers:
+ logging.getLogger().handlers[0].setFormatter(logging.Formatter(log_format))
+ else:
+ logging.basicConfig(format=log_format)
+ # Handle SystemExit here since python has a bug to exit current process
+ try:
+ return test.Run()
+ except SystemExit:
+ return TestResults()
+
+def SetTestsContainer(tests_container):
+ """Sets tests container.
+
+ multiprocessing.Queue can't be pickled across processes, so we need to set
+ this as a 'global', per process, via multiprocessing.Pool.
+ """
+ BaseTestSharder.tests_container = tests_container
+
+
+class BaseTestSharder(object):
+ """Base class for sharding tests across multiple devices.
+
+ Args:
+ attached_devices: A list of attached devices.
+ """
+ # See more in SetTestsContainer.
+ tests_container = None
+
+ def __init__(self, attached_devices):
+ self.attached_devices = attached_devices
+ self.retries = 1
+ self.tests = []
+
+ def CreateShardedTestRunner(self, device, index):
+ """Factory function to create a suite-specific test runner.
+
+ Args:
+ device: Device serial where this shard will run
+ index: Index of this device in the pool.
+
+ Returns:
+ An object of BaseTestRunner type (that can provide a "Run()" method).
+ """
+ pass
+
+ def SetupSharding(self, tests):
+ """Called before starting the shards."""
+ pass
+
+ def OnTestsCompleted(self, test_runners, test_results):
+ """Notifies that we completed the tests."""
+ pass
+
+ def RunShardedTests(self):
+ """Runs the tests in all connected devices.
+
+ Returns:
+ A TestResults object.
+ """
+ logging.warning('*' * 80)
+ logging.warning('Sharding in ' + str(len(self.attached_devices)) +
+ ' devices.')
+ logging.warning('Note that the output is not synchronized.')
+ logging.warning('Look for the "Final result" banner in the end.')
+ logging.warning('*' * 80)
+ final_results = TestResults()
+ for retry in xrange(self.retries):
+ logging.warning('Try %d of %d', retry + 1, self.retries)
+ self.SetupSharding(self.tests)
+ test_runners = []
+ for index, device in enumerate(self.attached_devices):
+ logging.warning('*' * 80)
+ logging.warning('Creating shard %d for %s', index, device)
+ logging.warning('*' * 80)
+ test_runner = self.CreateShardedTestRunner(device, index)
+ test_runners += [test_runner]
+ logging.warning('Starting...')
+ pool = multiprocessing.Pool(len(self.attached_devices),
+ SetTestsContainer,
+ [BaseTestSharder.tests_container])
+ # map can't handle KeyboardInterrupt exception. It's a python bug.
+ # So use map_async instead.
+ async_results = pool.map_async(_ShardedTestRunnable, test_runners)
+ results_lists = async_results.get(999999)
+ test_results = TestResults.FromTestResults(results_lists)
+ if retry == self.retries - 1:
+ all_passed = final_results.ok + test_results.ok
+ final_results = test_results
+ final_results.ok = all_passed
+ break
+ else:
+ final_results.ok += test_results.ok
+ self.tests = []
+ for t in test_results.GetAllBroken():
+ self.tests += [t.name]
+ if not self.tests:
+ break
+ self.OnTestsCompleted(test_runners, final_results)
+ return final_results
diff --git a/media/webrtc/trunk/build/android/pylib/buildbot_report.py b/media/webrtc/trunk/build/android/pylib/buildbot_report.py
new file mode 100644
index 000000000..fe3fcd638
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/buildbot_report.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper functions to print buildbot messages."""
+
+def PrintLink(label, url):
+ """Adds a link with name |label| linking to |url| to current buildbot step.
+
+ Args:
+ label: A string with the name of the label.
+ url: A string of the URL.
+ """
+ print '@@@STEP_LINK@%s@%s@@@' % (label, url)
+
+
+def PrintMsg(msg):
+ """Appends |msg| to the current buildbot step text.
+
+ Args:
+ msg: String to be appended.
+ """
+ print '@@@STEP_TEXT@%s@@@' % msg
+
+
+def PrintSummaryText(msg):
+ """Appends |msg| to main build summary. Visible from waterfall.
+
+ Args:
+ msg: String to be appended.
+ """
+ print '@@@STEP_SUMMARY_TEXT@%s@@@' % msg
+
+
+def PrintError():
+ """Marks the current step as failed."""
+ print '@@@STEP_FAILURE@@@'
+
+
+def PrintWarning():
+ """Marks the current step with a warning."""
+ print '@@@STEP_WARNINGS@@@'
+
+
+def PrintNamedStep(step):
+ print '@@@BUILD_STEP %s@@@' % step
diff --git a/media/webrtc/trunk/build/android/pylib/chrome_test_server_spawner.py b/media/webrtc/trunk/build/android/pylib/chrome_test_server_spawner.py
new file mode 100644
index 000000000..512a6091d
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/chrome_test_server_spawner.py
@@ -0,0 +1,402 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
+
+It's used to accept requests from the device to spawn and kill instances of the
+chrome test server on the host.
+"""
+
+import BaseHTTPServer
+import json
+import logging
+import os
+import select
+import struct
+import subprocess
+import threading
+import time
+import urlparse
+
+import constants
+from forwarder import Forwarder
+import ports
+
+
+# Path that are needed to import necessary modules when running testserver.py.
+os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + ':%s:%s:%s:%s' % (
+ os.path.join(constants.CHROME_DIR, 'third_party'),
+ os.path.join(constants.CHROME_DIR, 'third_party', 'tlslite'),
+ os.path.join(constants.CHROME_DIR, 'third_party', 'pyftpdlib', 'src'),
+ os.path.join(constants.CHROME_DIR, 'net', 'tools', 'testserver'))
+
+
+SERVER_TYPES = {
+ 'http': '',
+ 'ftp': '-f',
+ 'sync': '--sync',
+ 'tcpecho': '--tcp-echo',
+ 'udpecho': '--udp-echo',
+}
+
+
+# The timeout (in seconds) of starting up the Python test server.
+TEST_SERVER_STARTUP_TIMEOUT = 10
+
+
+def _CheckPortStatus(port, expected_status):
+ """Returns True if port has expected_status.
+
+ Args:
+ port: the port number.
+ expected_status: boolean of expected status.
+
+ Returns:
+ Returns True if the status is expected. Otherwise returns False.
+ """
+ for timeout in range(1, 5):
+ if ports.IsHostPortUsed(port) == expected_status:
+ return True
+ time.sleep(timeout)
+ return False
+
+
+def _GetServerTypeCommandLine(server_type):
+ """Returns the command-line by the given server type.
+
+ Args:
+ server_type: the server type to be used (e.g. 'http').
+
+ Returns:
+ A string containing the command-line argument.
+ """
+ if server_type not in SERVER_TYPES:
+ raise NotImplementedError('Unknown server type: %s' % server_type)
+ if server_type == 'udpecho':
+ raise Exception('Please do not run UDP echo tests because we do not have '
+ 'a UDP forwarder tool.')
+ return SERVER_TYPES[server_type]
+
+
+class TestServerThread(threading.Thread):
+ """A thread to run the test server in a separate process."""
+
+ def __init__(self, ready_event, arguments, adb, tool, build_type):
+ """Initialize TestServerThread with the following argument.
+
+ Args:
+ ready_event: event which will be set when the test server is ready.
+ arguments: dictionary of arguments to run the test server.
+ adb: instance of AndroidCommands.
+ tool: instance of runtime error detection tool.
+ build_type: 'Release' or 'Debug'.
+ """
+ threading.Thread.__init__(self)
+ self.wait_event = threading.Event()
+ self.stop_flag = False
+ self.ready_event = ready_event
+ self.ready_event.clear()
+ self.arguments = arguments
+ self.adb = adb
+ self.tool = tool
+ self.test_server_process = None
+ self.is_ready = False
+ self.host_port = self.arguments['port']
+ assert isinstance(self.host_port, int)
+ self._test_server_forwarder = None
+ # The forwarder device port now is dynamically allocated.
+ self.forwarder_device_port = 0
+ # Anonymous pipe in order to get port info from test server.
+ self.pipe_in = None
+ self.pipe_out = None
+ self.command_line = []
+ self.build_type = build_type
+
+ def _WaitToStartAndGetPortFromTestServer(self):
+ """Waits for the Python test server to start and gets the port it is using.
+
+ The port information is passed by the Python test server with a pipe given
+ by self.pipe_out. It is written as a result to |self.host_port|.
+
+ Returns:
+ Whether the port used by the test server was successfully fetched.
+ """
+ assert self.host_port == 0 and self.pipe_out and self.pipe_in
+ (in_fds, _, _) = select.select([self.pipe_in, ], [], [],
+ TEST_SERVER_STARTUP_TIMEOUT)
+ if len(in_fds) == 0:
+ logging.error('Failed to wait to the Python test server to be started.')
+ return False
+ # First read the data length as an unsigned 4-byte value. This
+ # is _not_ using network byte ordering since the Python test server packs
+ # size as native byte order and all Chromium platforms so far are
+ # configured to use little-endian.
+ # TODO(jnd): Change the Python test server and local_test_server_*.cc to
+ # use a unified byte order (either big-endian or little-endian).
+ data_length = os.read(self.pipe_in, struct.calcsize('=L'))
+ if data_length:
+ (data_length,) = struct.unpack('=L', data_length)
+ assert data_length
+ if not data_length:
+ logging.error('Failed to get length of server data.')
+ return False
+ port_json = os.read(self.pipe_in, data_length)
+ if not port_json:
+ logging.error('Failed to get server data.')
+ return False
+ logging.info('Got port json data: %s', port_json)
+ port_json = json.loads(port_json)
+ if port_json.has_key('port') and isinstance(port_json['port'], int):
+ self.host_port = port_json['port']
+ return _CheckPortStatus(self.host_port, True)
+ logging.error('Failed to get port information from the server data.')
+ return False
+
+ def _GenerateCommandLineArguments(self):
+ """Generates the command line to run the test server.
+
+ Note that all options are processed by following the definitions in
+ testserver.py.
+ """
+ if self.command_line:
+ return
+ # The following arguments must exist.
+ type_cmd = _GetServerTypeCommandLine(self.arguments['server-type'])
+ if type_cmd:
+ self.command_line.append(type_cmd)
+ self.command_line.append('--port=%d' % self.host_port)
+ # Use a pipe to get the port given by the instance of Python test server
+ # if the test does not specify the port.
+ if self.host_port == 0:
+ (self.pipe_in, self.pipe_out) = os.pipe()
+ self.command_line.append('--startup-pipe=%d' % self.pipe_out)
+ self.command_line.append('--host=%s' % self.arguments['host'])
+ data_dir = self.arguments['data-dir'] or 'chrome/test/data'
+ if not os.path.isabs(data_dir):
+ data_dir = os.path.join(constants.CHROME_DIR, data_dir)
+ self.command_line.append('--data-dir=%s' % data_dir)
+ # The following arguments are optional depending on the individual test.
+ if self.arguments.has_key('log-to-console'):
+ self.command_line.append('--log-to-console')
+ if self.arguments.has_key('auth-token'):
+ self.command_line.append('--auth-token=%s' % self.arguments['auth-token'])
+ if self.arguments.has_key('https'):
+ self.command_line.append('--https')
+ if self.arguments.has_key('cert-and-key-file'):
+ self.command_line.append('--cert-and-key-file=%s' % os.path.join(
+ constants.CHROME_DIR, self.arguments['cert-and-key-file']))
+ if self.arguments.has_key('ocsp'):
+ self.command_line.append('--ocsp=%s' % self.arguments['ocsp'])
+ if self.arguments.has_key('https-record-resume'):
+ self.command_line.append('--https-record-resume')
+ if self.arguments.has_key('ssl-client-auth'):
+ self.command_line.append('--ssl-client-auth')
+ if self.arguments.has_key('tls-intolerant'):
+ self.command_line.append('--tls-intolerant=%s' %
+ self.arguments['tls-intolerant'])
+ if self.arguments.has_key('ssl-client-ca'):
+ for ca in self.arguments['ssl-client-ca']:
+ self.command_line.append('--ssl-client-ca=%s' %
+ os.path.join(constants.CHROME_DIR, ca))
+ if self.arguments.has_key('ssl-bulk-cipher'):
+ for bulk_cipher in self.arguments['ssl-bulk-cipher']:
+ self.command_line.append('--ssl-bulk-cipher=%s' % bulk_cipher)
+
+ def run(self):
+ logging.info('Start running the thread!')
+ self.wait_event.clear()
+ self._GenerateCommandLineArguments()
+ command = [os.path.join(constants.CHROME_DIR, 'net', 'tools',
+ 'testserver', 'testserver.py')] + self.command_line
+ logging.info('Running: %s', command)
+ self.process = subprocess.Popen(command)
+ if self.process:
+ if self.pipe_out:
+ self.is_ready = self._WaitToStartAndGetPortFromTestServer()
+ else:
+ self.is_ready = _CheckPortStatus(self.host_port, True)
+ if self.is_ready:
+ self._test_server_forwarder = Forwarder(
+ self.adb, [(0, self.host_port)], self.tool, '127.0.0.1',
+ self.build_type)
+ # Check whether the forwarder is ready on the device.
+ self.is_ready = False
+ device_port = self._test_server_forwarder.DevicePortForHostPort(
+ self.host_port)
+ if device_port:
+ for timeout in range(1, 5):
+ if ports.IsDevicePortUsed(self.adb, device_port, 'LISTEN'):
+ self.is_ready = True
+ self.forwarder_device_port = device_port
+ break
+ time.sleep(timeout)
+ # Wake up the request handler thread.
+ self.ready_event.set()
+ # Keep thread running until Stop() gets called.
+ while not self.stop_flag:
+ time.sleep(1)
+ if self.process.poll() is None:
+ self.process.kill()
+ if self._test_server_forwarder:
+ self._test_server_forwarder.Close()
+ self.process = None
+ self.is_ready = False
+ if self.pipe_out:
+ os.close(self.pipe_in)
+ os.close(self.pipe_out)
+ self.pipe_in = None
+ self.pipe_out = None
+ logging.info('Test-server has died.')
+ self.wait_event.set()
+
+ def Stop(self):
+ """Blocks until the loop has finished.
+
+ Note that this must be called in another thread.
+ """
+ if not self.process:
+ return
+ self.stop_flag = True
+ self.wait_event.wait()
+
+
+class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """A handler used to process http GET/POST request."""
+
+ def _SendResponse(self, response_code, response_reason, additional_headers,
+ contents):
+ """Generates a response sent to the client from the provided parameters.
+
+ Args:
+ response_code: number of the response status.
+ response_reason: string of reason description of the response.
+ additional_headers: dict of additional headers. Each key is the name of
+ the header, each value is the content of the header.
+ contents: string of the contents we want to send to client.
+ """
+ self.send_response(response_code, response_reason)
+ self.send_header('Content-Type', 'text/html')
+ # Specify the content-length as without it the http(s) response will not
+ # be completed properly (and the browser keeps expecting data).
+ self.send_header('Content-Length', len(contents))
+ for header_name in additional_headers:
+ self.send_header(header_name, additional_headers[header_name])
+ self.end_headers()
+ self.wfile.write(contents)
+ self.wfile.flush()
+
+ def _StartTestServer(self):
+ """Starts the test server thread."""
+ logging.info('Handling request to spawn a test server.')
+ content_type = self.headers.getheader('content-type')
+ if content_type != 'application/json':
+ raise Exception('Bad content-type for start request.')
+ content_length = self.headers.getheader('content-length')
+ if not content_length:
+ content_length = 0
+ try:
+ content_length = int(content_length)
+ except:
+ raise Exception('Bad content-length for start request.')
+ logging.info(content_length)
+ test_server_argument_json = self.rfile.read(content_length)
+ logging.info(test_server_argument_json)
+ assert not self.server.test_server_instance
+ ready_event = threading.Event()
+ self.server.test_server_instance = TestServerThread(
+ ready_event,
+ json.loads(test_server_argument_json),
+ self.server.adb,
+ self.server.tool,
+ self.server.build_type)
+ self.server.test_server_instance.setDaemon(True)
+ self.server.test_server_instance.start()
+ ready_event.wait()
+ if self.server.test_server_instance.is_ready:
+ self._SendResponse(200, 'OK', {}, json.dumps(
+ {'port': self.server.test_server_instance.forwarder_device_port,
+ 'message': 'started'}))
+ logging.info('Test server is running on port: %d.',
+ self.server.test_server_instance.host_port)
+ else:
+ self.server.test_server_instance.Stop()
+ self.server.test_server_instance = None
+ self._SendResponse(500, 'Test Server Error.', {}, '')
+ logging.info('Encounter problem during starting a test server.')
+
+ def _KillTestServer(self):
+ """Stops the test server instance."""
+ # There should only ever be one test server at a time. This may do the
+ # wrong thing if we try and start multiple test servers.
+ if not self.server.test_server_instance:
+ return
+ port = self.server.test_server_instance.host_port
+ logging.info('Handling request to kill a test server on port: %d.', port)
+ self.server.test_server_instance.Stop()
+ # Make sure the status of test server is correct before sending response.
+ if _CheckPortStatus(port, False):
+ self._SendResponse(200, 'OK', {}, 'killed')
+ logging.info('Test server on port %d is killed', port)
+ else:
+ self._SendResponse(500, 'Test Server Error.', {}, '')
+ logging.info('Encounter problem during killing a test server.')
+ self.server.test_server_instance = None
+
+ def do_POST(self):
+ parsed_path = urlparse.urlparse(self.path)
+ action = parsed_path.path
+ logging.info('Action for POST method is: %s.', action)
+ if action == '/start':
+ self._StartTestServer()
+ else:
+ self._SendResponse(400, 'Unknown request.', {}, '')
+ logging.info('Encounter unknown request: %s.', action)
+
+ def do_GET(self):
+ parsed_path = urlparse.urlparse(self.path)
+ action = parsed_path.path
+ params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
+ logging.info('Action for GET method is: %s.', action)
+ for param in params:
+ logging.info('%s=%s', param, params[param][0])
+ if action == '/kill':
+ self._KillTestServer()
+ elif action == '/ping':
+ # The ping handler is used to check whether the spawner server is ready
+ # to serve the requests. We don't need to test the status of the test
+ # server when handling ping request.
+ self._SendResponse(200, 'OK', {}, 'ready')
+ logging.info('Handled ping request and sent response.')
+ else:
+ self._SendResponse(400, 'Unknown request', {}, '')
+ logging.info('Encounter unknown request: %s.', action)
+
+
+class SpawningServer(object):
+ """The class used to start/stop a http server."""
+
+ def __init__(self, test_server_spawner_port, adb, tool, build_type):
+ logging.info('Creating new spawner on port: %d.', test_server_spawner_port)
+ self.server = BaseHTTPServer.HTTPServer(('', test_server_spawner_port),
+ SpawningServerRequestHandler)
+ self.port = test_server_spawner_port
+ self.server.adb = adb
+ self.server.tool = tool
+ self.server.test_server_instance = None
+ self.server.build_type = build_type
+
+ def _Listen(self):
+ logging.info('Starting test server spawner')
+ self.server.serve_forever()
+
+ def Start(self):
+ listener_thread = threading.Thread(target=self._Listen)
+ listener_thread.setDaemon(True)
+ listener_thread.start()
+ time.sleep(1)
+
+ def Stop(self):
+ if self.server.test_server_instance:
+ self.server.test_server_instance.Stop()
+ self.server.shutdown()
diff --git a/media/webrtc/trunk/build/android/pylib/cmd_helper.py b/media/webrtc/trunk/build/android/pylib/cmd_helper.py
new file mode 100644
index 000000000..8b5013090
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/cmd_helper.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A wrapper for subprocess to make calling shell commands easier."""
+
+
+import logging
+import subprocess
+
+
+def RunCmd(args, cwd=None):
+ """Opens a subprocess to execute a program and returns its return value.
+
+ Args:
+ args: A string or a sequence of program arguments. The program to execute is
+ the string or the first item in the args sequence.
+ cwd: If not None, the subprocess's current directory will be changed to
+ |cwd| before it's executed.
+
+ Returns:
+ Return code from the command execution.
+ """
+ logging.info(str(args) + ' ' + (cwd or ''))
+ p = subprocess.Popen(args=args, cwd=cwd)
+ return p.wait()
+
+
+def GetCmdOutput(args, cwd=None, shell=False):
+ """Open a subprocess to execute a program and returns its output.
+
+ Args:
+ args: A string or a sequence of program arguments. The program to execute is
+ the string or the first item in the args sequence.
+ cwd: If not None, the subprocess's current directory will be changed to
+ |cwd| before it's executed.
+ shell: Whether to execute args as a shell command.
+
+ Returns:
+ Captures and returns the command's stdout.
+ Prints the command's stderr to logger (which defaults to stdout).
+ """
+ logging.info(str(args) + ' ' + (cwd or ''))
+ p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, shell=shell)
+ stdout, stderr = p.communicate()
+ if stderr:
+ logging.critical(stderr)
+ logging.info(stdout[:4096]) # Truncate output longer than 4k.
+ return stdout
diff --git a/media/webrtc/trunk/build/android/pylib/constants.py b/media/webrtc/trunk/build/android/pylib/constants.py
new file mode 100644
index 000000000..b340612e2
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/constants.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Defines a set of constants shared by test runners and other scripts."""
+
+import os
+
+
+CHROME_PACKAGE = 'com.google.android.apps.chrome'
+CHROME_ACTIVITY = 'com.google.android.apps.chrome.Main'
+CHROME_TESTS_PACKAGE = 'com.google.android.apps.chrome.tests'
+LEGACY_BROWSER_PACKAGE = 'com.google.android.browser'
+LEGACY_BROWSER_ACTIVITY = 'com.android.browser.BrowserActivity'
+CONTENT_SHELL_PACKAGE = "org.chromium.content_shell"
+CONTENT_SHELL_ACTIVITY = "org.chromium.content_shell.ContentShellActivity"
+CHROME_SHELL_PACKAGE = 'org.chromium.chrome.browser.test'
+CHROMIUM_TEST_SHELL_PACKAGE = 'org.chromium.chrome.testshell'
+
+CHROME_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '..', '..', '..'))
+
+# Ports arrangement for various test servers used in Chrome for Android.
+# Lighttpd server will attempt to use 9000 as default port, if unavailable it
+# will find a free port from 8001 - 8999.
+LIGHTTPD_DEFAULT_PORT = 9000
+LIGHTTPD_RANDOM_PORT_FIRST = 8001
+LIGHTTPD_RANDOM_PORT_LAST = 8999
+TEST_SYNC_SERVER_PORT = 9031
+
+# The net test server is started from 10000. Reserve 20000 ports for the all
+# test-server based tests should be enough for allocating different port for
+# individual test-server based test.
+TEST_SERVER_PORT_FIRST = 10000
+TEST_SERVER_PORT_LAST = 30000
+# A file to record next valid port of test server.
+TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
+TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
+
+TEST_EXECUTABLE_DIR = '/data/local/tmp'
+# Directories for common java libraries for SDK build.
+# These constants are defined in build/android/ant/common.xml
+SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
+SDK_BUILD_APKS_DIR = 'apks'
+
+# The directory on the device where perf test output gets saved to.
+DEVICE_PERF_OUTPUT_DIR = '/data/data/' + CHROME_PACKAGE + '/files'
diff --git a/media/webrtc/trunk/build/android/pylib/debug_info.py b/media/webrtc/trunk/build/android/pylib/debug_info.py
new file mode 100644
index 000000000..6f0f55a33
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/debug_info.py
@@ -0,0 +1,196 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Collect debug info for a test."""
+
+import datetime
+import logging
+import os
+import re
+import shutil
+import string
+import subprocess
+import tempfile
+
+import cmd_helper
+
+
+TOMBSTONE_DIR = '/data/tombstones/'
+
+
+class GTestDebugInfo(object):
+ """A helper class to collect related debug information for a gtest.
+
+ Debug info is collected in two steps:
+ - first, object(s) of this class (one per device), accumulate logs
+ and screenshots in tempdir.
+ - once the test has finished, call ZipAndCleanResults to create
+ a zip containing the logs from all devices, and clean them up.
+
+ Args:
+ adb: ADB interface the tests are using.
+ device: Serial# of the Android device in which the specified gtest runs.
+ testsuite_name: Name of the specified gtest.
+ gtest_filter: Test filter used by the specified gtest.
+ """
+
+ def __init__(self, adb, device, testsuite_name, gtest_filter):
+ """Initializes the DebugInfo class for a specified gtest."""
+ self.adb = adb
+ self.device = device
+ self.testsuite_name = testsuite_name
+ self.gtest_filter = gtest_filter
+ self.logcat_process = None
+ self.has_storage = False
+ self.log_dir = os.path.join(tempfile.gettempdir(),
+ 'gtest_debug_info',
+ self.testsuite_name,
+ self.device)
+ if not os.path.exists(self.log_dir):
+ os.makedirs(self.log_dir)
+ self.log_file_name = os.path.join(self.log_dir,
+ self._GeneratePrefixName() + '_log.txt')
+ self.old_crash_files = self._ListCrashFiles()
+
+ def _GetSignatureFromGTestFilter(self):
+ """Gets a signature from gtest_filter.
+
+ Signature is used to identify the tests from which we collect debug
+ information.
+
+ Returns:
+ A signature string. Returns 'all' if there is no gtest filter.
+ """
+ if not self.gtest_filter:
+ return 'all'
+ filename_chars = "-_()%s%s" % (string.ascii_letters, string.digits)
+ signature = ''.join(c for c in self.gtest_filter if c in filename_chars)
+ if len(signature) > 64:
+ # The signature can't be too long, as it'll be part of a file name.
+ signature = signature[:64]
+ return signature
+
+ def _GeneratePrefixName(self):
+ """Generates a prefix name for debug information of the test.
+
+ The prefix name consists of the following:
+ (1) root name of test_suite_base.
+ (2) device serial number.
+ (3) prefix of filter signature generate from gtest_filter.
+ (4) date & time when calling this method.
+
+ Returns:
+ Name of the log file.
+ """
+ return (os.path.splitext(self.testsuite_name)[0] + '_' + self.device + '_' +
+ self._GetSignatureFromGTestFilter() + '_' +
+ datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f'))
+
+ def StartRecordingLog(self, clear=True, filters=['*:v']):
+ """Starts recording logcat output to a file.
+
+ This call should come before running test, with calling StopRecordingLog
+ following the tests.
+
+ Args:
+ clear: True if existing log output should be cleared.
+ filters: A list of logcat filters to be used.
+ """
+ self.StopRecordingLog()
+ if clear:
+ cmd_helper.RunCmd(['adb', '-s', self.device, 'logcat', '-c'])
+ logging.info('Start dumping log to %s ...', self.log_file_name)
+ command = 'adb -s %s logcat -v threadtime %s > %s' % (self.device,
+ ' '.join(filters),
+ self.log_file_name)
+ self.logcat_process = subprocess.Popen(command, shell=True)
+
+ def StopRecordingLog(self):
+ """Stops an existing logcat recording subprocess."""
+ if not self.logcat_process:
+ return
+ # Cannot evaluate directly as 0 is a possible value.
+ if self.logcat_process.poll() is None:
+ self.logcat_process.kill()
+ self.logcat_process = None
+ logging.info('Finish log dump.')
+
+ def TakeScreenshot(self, identifier_mark):
+ """Takes a screen shot from current specified device.
+
+ Args:
+ identifier_mark: A string to identify the screen shot DebugInfo will take.
+ It will be part of filename of the screen shot. Empty
+ string is acceptable.
+ Returns:
+ Returns the file name on the host of the screenshot if successful,
+ None otherwise.
+ """
+ assert isinstance(identifier_mark, str)
+ screenshot_path = os.path.join(os.getenv('ANDROID_HOST_OUT', ''),
+ 'bin',
+ 'screenshot2')
+ if not os.path.exists(screenshot_path):
+ logging.error('Failed to take screen shot from device %s', self.device)
+ return None
+ shot_path = os.path.join(self.log_dir, ''.join([self._GeneratePrefixName(),
+ identifier_mark,
+ '_screenshot.png']))
+ re_success = re.compile(re.escape('Success.'), re.MULTILINE)
+ if re_success.findall(cmd_helper.GetCmdOutput([screenshot_path, '-s',
+ self.device, shot_path])):
+ logging.info('Successfully took a screen shot to %s', shot_path)
+ return shot_path
+ logging.error('Failed to take screen shot from device %s', self.device)
+ return None
+
+ def _ListCrashFiles(self):
+ """Collects crash files from current specified device.
+
+ Returns:
+ A dict of crash files in format {"name": (size, lastmod), ...}.
+ """
+ return self.adb.ListPathContents(TOMBSTONE_DIR)
+
+ def ArchiveNewCrashFiles(self):
+ """Archives the crash files newly generated until calling this method."""
+ current_crash_files = self._ListCrashFiles()
+ files = []
+ for f in current_crash_files:
+ if f not in self.old_crash_files:
+ files += [f]
+ elif current_crash_files[f] != self.old_crash_files[f]:
+ # Tombstones dir can only have maximum 10 files, so we need to compare
+ # size and timestamp information of file if the file exists.
+ files += [f]
+ if files:
+ logging.info('New crash file(s):%s' % ' '.join(files))
+ for f in files:
+ self.adb.Adb().Pull(TOMBSTONE_DIR + f,
+ os.path.join(self.log_dir, f))
+
+ @staticmethod
+ def ZipAndCleanResults(dest_dir, dump_file_name):
+ """A helper method to zip all debug information results into a dump file.
+
+ Args:
+ dest_dir: Dir path in where we put the dump file.
+ dump_file_name: Desired name of the dump file. This method makes sure
+ '.zip' will be added as ext name.
+ """
+ if not dest_dir or not dump_file_name:
+ return
+ cmd_helper.RunCmd(['mkdir', '-p', dest_dir])
+ log_basename = os.path.basename(dump_file_name)
+ log_zip_file = os.path.join(dest_dir,
+ os.path.splitext(log_basename)[0] + '.zip')
+ logging.info('Zipping debug dumps into %s ...', log_zip_file)
+ # Add new dumps into the zip file. The zip may exist already if previous
+ # gtest also dumps the debug information. It's OK since we clean up the old
+ # dumps in each build step.
+ log_src_dir = os.path.join(tempfile.gettempdir(), 'gtest_debug_info')
+ cmd_helper.RunCmd(['zip', '-q', '-r', log_zip_file, log_src_dir])
+ assert os.path.exists(log_zip_file)
+ assert os.path.exists(log_src_dir)
+ shutil.rmtree(log_src_dir)
diff --git a/media/webrtc/trunk/build/android/pylib/device_stats_monitor.html b/media/webrtc/trunk/build/android/pylib/device_stats_monitor.html
new file mode 100644
index 000000000..b3abbb0bd
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/device_stats_monitor.html
@@ -0,0 +1,143 @@
+<!DOCTYPE html>
+<!--
+ * Copyright (c) 2012 The Chromium Authors. All rights reserved. Use of this
+ * source code is governed by a BSD-style license that can be found in the
+ * LICENSE file.
+-->
+<html>
+<head>
+ <title>Device Stats Monitor</title>
+ <script type="text/javascript" src="http://www.google.com/jsapi"></script>
+ <style>
+ body {
+ font-family: sans-serif
+ }
+ </style>
+</head>
+<body>
+<h2>Device Stats Monitor</h2>
+<ul>
+<li>Pass path to trace data via the <code>results</code> querystring param.
+<li>Combine charts with the <code>combine</code> querystring param (e.g. <code>&combine=sectors_read,sectors_written</code>).
+<li>Use <code>stacked=true</code> to stack combined charts instead of overlaying (default).
+</ul>
+</body>
+<script>
+google.load("visualization", "1", {packages:["corechart"]});
+
+/**
+ * @returns The querystring param value for |name| or an empty string.
+ */
+function getQuerystringParam(name) {
+ name = name.replace(/[\[]/, "\\\[").replace(/[\]]/, "\\\]");
+ var regexS = "[\\?&]" + name + "=([^&#]*)";
+ var regex = new RegExp(regexS);
+ var results = regex.exec(window.location.search);
+ if (results == null)
+ return "";
+ else
+ return decodeURIComponent(results[1].replace(/\+/g, " "));
+}
+
+/**
+ * @returns An array of keys in |obj| sorted by value.
+ */
+function sortedKeys(obj) {
+ var keys = [];
+ for (var key in obj) {
+ keys.push(key);
+ }
+ keys.sort();
+ return keys;
+}
+
+/**
+ * Removes by value all params from array.
+ */
+Array.prototype.remove = function() {
+ var what, a = arguments, l = a.length, ax;
+ while (l && this.length) {
+ what = a[--l];
+ while ((ax = this.indexOf(what)) != -1) {
+ this.splice(ax, 1);
+ }
+ }
+ return this;
+}
+
+/**
+ * Displays a new chart.
+ *
+ * @param {Number} hz Number of sample per second of the data.
+ * @param {String} name Name to display on top of chart.
+ * @param {Number[][]} values Array of value arrays to display.
+ * @param {Boolean} stacked Whether to display values as stacked.
+ */
+function displayChart(hz, name, values, units, stacked) {
+ var data = new google.visualization.DataTable();
+ data.addColumn('number', 'ms');
+ var names = name.split(',');
+ for (var i = 0; i < names.length; i++) {
+ data.addColumn('number', names[i]);
+ }
+
+ var rows = [];
+ var interval = 1000.0 / hz;
+ for (var i = 0; i < values[0].length; i++) {
+ var row = [i*interval];
+ for (var j = 0; j < values.length; j++) {
+ row.push(values[j][i]);
+ }
+ rows.push(row);
+ }
+ data.addRows(rows);
+
+ var options = {
+ hAxis: {title: 'ms (' + hz + 'hz)'},
+ isStacked: stacked,
+ legend: {position: 'top'},
+ vAxis: {title: units},
+ };
+
+ var elem = document.createElement('DIV');
+ elem.style = 'width:100%;height:500px';
+ document.body.appendChild(elem);
+ var chart = new google.visualization.AreaChart(elem);
+ chart.draw(data, options);
+}
+
+/**
+ * Displays all charts.
+ *
+ * Invoked by the results script. JSONP is used to avoid security
+ * restrictions on XHRs for file:// URLs.
+ */
+function display(hz, results, units) {
+ var combine = getQuerystringParam('combine');
+ var keys = sortedKeys(results);
+ for (var i = 0; i < keys.length; i++) {
+ var key = keys[i];
+ var name = key;
+ var values = [results[key]];
+ var unit = units[key];
+ if (combine.indexOf(key) >= 0) {
+ i--;
+ name = combine;
+ values = [];
+ var combined_keys = combine.split(',');
+ for (var j = 0; j < combined_keys.length; j++) {
+ values.push(results[combined_keys[j]]);
+ keys.remove(combined_keys[j]);
+ }
+ }
+ displayChart(hz, name, values, unit, !!getQuerystringParam('stacked'));
+ }
+}
+
+var resultsPath = getQuerystringParam('results');
+if (resultsPath)
+ document.write("<script src='" + resultsPath + "'></"+"script>");
+else
+ document.write("Please specify results querystring param.");
+</script>
+</html>
diff --git a/media/webrtc/trunk/build/android/pylib/device_stats_monitor.py b/media/webrtc/trunk/build/android/pylib/device_stats_monitor.py
new file mode 100644
index 000000000..8be4efae5
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/device_stats_monitor.py
@@ -0,0 +1,116 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for iotop/top style profiling for android."""
+
+import collections
+import json
+import os
+import subprocess
+import sys
+import urllib
+
+import constants
+import io_stats_parser
+
+
+class DeviceStatsMonitor(object):
+ """Class for collecting device stats such as IO/CPU usage.
+
+ Args:
+ adb: Instance of AndroidComannds.
+ hz: Frequency at which to sample device stats.
+ """
+
+ DEVICE_PATH = constants.TEST_EXECUTABLE_DIR + '/device_stats_monitor'
+ PROFILE_PATH = (constants.DEVICE_PERF_OUTPUT_DIR +
+ '/device_stats_monitor.profile')
+ RESULT_VIEWER_PATH = os.path.abspath(os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), 'device_stats_monitor.html'))
+
+ def __init__(self, adb, hz, build_type):
+ self._adb = adb
+ host_path = os.path.abspath(os.path.join(
+ constants.CHROME_DIR, 'out', build_type, 'device_stats_monitor'))
+ self._adb.PushIfNeeded(host_path, DeviceStatsMonitor.DEVICE_PATH)
+ self._hz = hz
+
+ def Start(self):
+ """Starts device stats monitor on the device."""
+ self._adb.SetFileContents(DeviceStatsMonitor.PROFILE_PATH, '')
+ self._process = subprocess.Popen(
+ ['adb', 'shell', '%s --hz=%d %s' % (
+ DeviceStatsMonitor.DEVICE_PATH, self._hz,
+ DeviceStatsMonitor.PROFILE_PATH)])
+
+ def StopAndCollect(self, output_path):
+ """Stops monitoring and saves results.
+
+ Args:
+ output_path: Path to save results.
+
+ Returns:
+ String of URL to load results in browser.
+ """
+ assert self._process
+ self._adb.KillAll(DeviceStatsMonitor.DEVICE_PATH)
+ self._process.wait()
+ profile = self._adb.GetFileContents(DeviceStatsMonitor.PROFILE_PATH)
+
+ results = collections.defaultdict(list)
+ last_io_stats = None
+ last_cpu_stats = None
+ for line in profile:
+ if ' mmcblk0 ' in line:
+ stats = io_stats_parser.ParseIoStatsLine(line)
+ if last_io_stats:
+ results['sectors_read'].append(stats.num_sectors_read -
+ last_io_stats.num_sectors_read)
+ results['sectors_written'].append(stats.num_sectors_written -
+ last_io_stats.num_sectors_written)
+ last_io_stats = stats
+ elif line.startswith('cpu '):
+ stats = self._ParseCpuStatsLine(line)
+ if last_cpu_stats:
+ results['user'].append(stats.user - last_cpu_stats.user)
+ results['nice'].append(stats.nice - last_cpu_stats.nice)
+ results['system'].append(stats.system - last_cpu_stats.system)
+ results['idle'].append(stats.idle - last_cpu_stats.idle)
+ results['iowait'].append(stats.iowait - last_cpu_stats.iowait)
+ results['irq'].append(stats.irq - last_cpu_stats.irq)
+ results['softirq'].append(stats.softirq- last_cpu_stats.softirq)
+ last_cpu_stats = stats
+ units = {
+ 'sectors_read': 'sectors',
+ 'sectors_written': 'sectors',
+ 'user': 'jiffies',
+ 'nice': 'jiffies',
+ 'system': 'jiffies',
+ 'idle': 'jiffies',
+ 'iowait': 'jiffies',
+ 'irq': 'jiffies',
+ 'softirq': 'jiffies',
+ }
+ with open(output_path, 'w') as f:
+ f.write('display(%d, %s, %s);' % (self._hz, json.dumps(results), units))
+ return 'file://%s?results=file://%s' % (
+ DeviceStatsMonitor.RESULT_VIEWER_PATH, urllib.quote(output_path))
+
+
+ @staticmethod
+ def _ParseCpuStatsLine(line):
+ """Parses a line of cpu stats into a CpuStats named tuple."""
+ # Field definitions: http://www.linuxhowtos.org/System/procstat.htm
+ cpu_stats = collections.namedtuple('CpuStats',
+ ['device',
+ 'user',
+ 'nice',
+ 'system',
+ 'idle',
+ 'iowait',
+ 'irq',
+ 'softirq',
+ ])
+ fields = line.split()
+ return cpu_stats._make([fields[0]] + [int(f) for f in fields[1:8]])
diff --git a/media/webrtc/trunk/build/android/pylib/fake_dns.py b/media/webrtc/trunk/build/android/pylib/fake_dns.py
new file mode 100644
index 000000000..1c6449004
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/fake_dns.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import android_commands
+import constants
+import logging
+import os
+import subprocess
+import time
+
+
+class FakeDns(object):
+ """Wrapper class for the fake_dns tool."""
+ _FAKE_DNS_PATH = constants.TEST_EXECUTABLE_DIR + '/fake_dns'
+
+ def __init__(self, adb, build_type):
+ """
+ Args:
+ adb: the AndroidCommands to use.
+ build_type: 'Release' or 'Debug'.
+ """
+ self._adb = adb
+ self._build_type = build_type
+ self._fake_dns = None
+ self._original_dns = None
+
+ def _PushAndStartFakeDns(self):
+ """Starts the fake_dns server that replies all name queries 127.0.0.1.
+
+ Returns:
+ subprocess instance connected to the fake_dns process on the device.
+ """
+ self._adb.PushIfNeeded(
+ os.path.join(constants.CHROME_DIR, 'out', self._build_type, 'fake_dns'),
+ FakeDns._FAKE_DNS_PATH)
+ return subprocess.Popen(
+ ['adb', '-s', self._adb._adb.GetSerialNumber(),
+ 'shell', '%s -D' % FakeDns._FAKE_DNS_PATH])
+
+ def SetUp(self):
+ """Configures the system to point to a DNS server that replies 127.0.0.1.
+
+ This can be used in combination with the forwarder to forward all web
+ traffic to a replay server.
+
+ The TearDown() method will perform all cleanup.
+ """
+ self._adb.RunShellCommand('ip route add 8.8.8.0/24 via 127.0.0.1 dev lo')
+ self._fake_dns = self._PushAndStartFakeDns()
+ self._original_dns = self._adb.RunShellCommand('getprop net.dns1')[0]
+ self._adb.RunShellCommand('setprop net.dns1 127.0.0.1')
+ time.sleep(2) # Time for server to start and the setprop to take effect.
+
+ def TearDown(self):
+ """Shuts down the fake_dns."""
+ if self._fake_dns:
+ if not self._original_dns or self._original_dns == '127.0.0.1':
+ logging.warning('Bad original DNS, falling back to Google DNS.')
+ self._original_dns = '8.8.8.8'
+ self._adb.RunShellCommand('setprop net.dns1 %s' % self._original_dns)
+ self._fake_dns.kill()
+ self._adb.RunShellCommand('ip route del 8.8.8.0/24 via 127.0.0.1 dev lo')
diff --git a/media/webrtc/trunk/build/android/pylib/flag_changer.py b/media/webrtc/trunk/build/android/pylib/flag_changer.py
new file mode 100644
index 000000000..621056156
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/flag_changer.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import constants
+import traceback
+import warnings
+
+
+# Location where chrome reads command line flags from
+CHROME_COMMAND_FILE = constants.TEST_EXECUTABLE_DIR + '/chrome-command-line'
+
+class FlagChanger(object):
+ """Changes the flags Chrome runs with.
+
+ There are two different use cases for this file:
+ * Flags are permanently set by calling Set().
+ * Flags can be temporarily set for a particular set of unit tests. These
+ tests should call Restore() to revert the flags to their original state
+ once the tests have completed.
+ """
+
+ def __init__(self, android_cmd):
+ self._android_cmd = android_cmd
+
+ # Save the original flags.
+ self._orig_line = self._android_cmd.GetFileContents(CHROME_COMMAND_FILE)
+ if self._orig_line:
+ self._orig_line = self._orig_line[0].strip()
+
+ # Parse out the flags into a list to facilitate adding and removing flags.
+ self._current_flags = self._TokenizeFlags(self._orig_line)
+
+ def Get(self):
+ """Returns list of current flags."""
+ return self._current_flags
+
+ def Set(self, flags):
+ """Replaces all flags on the current command line with the flags given.
+
+ Args:
+ flags: A list of flags to set, eg. ['--single-process'].
+ """
+ if flags:
+ assert flags[0] != 'chrome'
+
+ self._current_flags = flags
+ self._UpdateCommandLineFile()
+
+ def AddFlags(self, flags):
+ """Appends flags to the command line if they aren't already there.
+
+ Args:
+ flags: A list of flags to add on, eg. ['--single-process'].
+ """
+ if flags:
+ assert flags[0] != 'chrome'
+
+ # Avoid appending flags that are already present.
+ for flag in flags:
+ if flag not in self._current_flags:
+ self._current_flags.append(flag)
+ self._UpdateCommandLineFile()
+
+ def RemoveFlags(self, flags):
+ """Removes flags from the command line, if they exist.
+
+ Args:
+ flags: A list of flags to remove, eg. ['--single-process']. Note that we
+ expect a complete match when removing flags; if you want to remove
+ a switch with a value, you must use the exact string used to add
+ it in the first place.
+ """
+ if flags:
+ assert flags[0] != 'chrome'
+
+ for flag in flags:
+ if flag in self._current_flags:
+ self._current_flags.remove(flag)
+ self._UpdateCommandLineFile()
+
+ def Restore(self):
+ """Restores the flags to their original state."""
+ self._current_flags = self._TokenizeFlags(self._orig_line)
+ self._UpdateCommandLineFile()
+
+ def _UpdateCommandLineFile(self):
+ """Writes out the command line to the file, or removes it if empty."""
+ print "Current flags: ", self._current_flags
+
+ if self._current_flags:
+ self._android_cmd.SetFileContents(CHROME_COMMAND_FILE,
+ 'chrome ' +
+ ' '.join(self._current_flags))
+ else:
+ self._android_cmd.RunShellCommand('rm ' + CHROME_COMMAND_FILE)
+
+ def _TokenizeFlags(self, line):
+ """Changes the string containing the command line into a list of flags.
+
+ Follows similar logic to CommandLine.java::tokenizeQuotedArguments:
+ * Flags are split using whitespace, unless the whitespace is within a
+ pair of quotation marks.
+ * Unlike the Java version, we keep the quotation marks around switch
+ values since we need them to re-create the file when new flags are
+ appended.
+
+ Args:
+ line: A string containing the entire command line. The first token is
+ assumed to be the program name.
+ """
+ if not line:
+ return []
+
+ tokenized_flags = []
+ current_flag = ""
+ within_quotations = False
+
+ # Move through the string character by character and build up each flag
+ # along the way.
+ for c in line.strip():
+ if c is '"':
+ if len(current_flag) > 0 and current_flag[-1] == '\\':
+ # Last char was a backslash; pop it, and treat this " as a literal.
+ current_flag = current_flag[0:-1] + '"'
+ else:
+ within_quotations = not within_quotations
+ current_flag += c
+ elif not within_quotations and (c is ' ' or c is '\t'):
+ if current_flag is not "":
+ tokenized_flags.append(current_flag)
+ current_flag = ""
+ else:
+ current_flag += c
+
+ # Tack on the last flag.
+ if not current_flag:
+ if within_quotations:
+ warnings.warn("Unterminated quoted string: " + current_flag)
+ else:
+ tokenized_flags.append(current_flag)
+
+ # Return everything but the program name.
+ return tokenized_flags[1:]
diff --git a/media/webrtc/trunk/build/android/pylib/forwarder.py b/media/webrtc/trunk/build/android/pylib/forwarder.py
new file mode 100644
index 000000000..bc41db3ff
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/forwarder.py
@@ -0,0 +1,198 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import os
+import re
+import sys
+import time
+
+import android_commands
+import cmd_helper
+import constants
+import ports
+
+from pylib import pexpect
+
+class Forwarder(object):
+ """Class to manage port forwards from the device to the host."""
+
+ _DEVICE_FORWARDER_PATH = constants.TEST_EXECUTABLE_DIR + '/device_forwarder'
+
+ # Unix Abstract socket path:
+ _DEVICE_ADB_CONTROL_PORT = 'chrome_device_forwarder'
+ _TIMEOUT_SECS = 30
+
+ def __init__(self, adb, port_pairs, tool, host_name, build_type):
+ """Forwards TCP ports on the device back to the host.
+
+ Works like adb forward, but in reverse.
+
+ Args:
+ adb: Instance of AndroidCommands for talking to the device.
+ port_pairs: A list of tuples (device_port, host_port) to forward. Note
+ that you can specify 0 as a device_port, in which case a
+ port will by dynamically assigned on the device. You can
+ get the number of the assigned port using the
+ DevicePortForHostPort method.
+ tool: Tool class to use to get wrapper, if necessary, for executing the
+ forwarder (see valgrind_tools.py).
+ host_name: Address to forward to, must be addressable from the
+ host machine. Usually use loopback '127.0.0.1'.
+ build_type: 'Release' or 'Debug'.
+
+ Raises:
+ Exception on failure to forward the port.
+ """
+ self._adb = adb
+ self._host_to_device_port_map = dict()
+ self._host_process = None
+ self._device_process = None
+ self._adb_forward_process = None
+
+ self._host_adb_control_port = ports.AllocateTestServerPort()
+ if not self._host_adb_control_port:
+ raise Exception('Failed to allocate a TCP port in the host machine.')
+ adb.PushIfNeeded(
+ os.path.join(constants.CHROME_DIR, 'out', build_type,
+ 'device_forwarder'),
+ Forwarder._DEVICE_FORWARDER_PATH)
+ self._host_forwarder_path = os.path.join(constants.CHROME_DIR,
+ 'out',
+ build_type,
+ 'host_forwarder')
+ forward_string = ['%d:%d:%s' %
+ (device, host, host_name) for device, host in port_pairs]
+ logging.info('Forwarding ports: %s', forward_string)
+ timeout_sec = 5
+ host_pattern = 'host_forwarder.*' + ' '.join(forward_string)
+ # TODO(felipeg): Rather than using a blocking kill() here, the device
+ # forwarder could try to bind the Unix Domain Socket until it succeeds or
+ # while it fails because the socket is already bound (with appropriate
+ # timeout handling obviously).
+ self._KillHostForwarderBlocking(host_pattern, timeout_sec)
+ self._KillDeviceForwarderBlocking(timeout_sec)
+ self._adb_forward_process = pexpect.spawn(
+ 'adb', ['-s',
+ adb._adb.GetSerialNumber(),
+ 'forward',
+ 'tcp:%s' % self._host_adb_control_port,
+ 'localabstract:%s' % Forwarder._DEVICE_ADB_CONTROL_PORT])
+ self._device_process = pexpect.spawn(
+ 'adb', ['-s',
+ adb._adb.GetSerialNumber(),
+ 'shell',
+ '%s %s -D --adb_sock=%s' % (
+ tool.GetUtilWrapper(),
+ Forwarder._DEVICE_FORWARDER_PATH,
+ Forwarder._DEVICE_ADB_CONTROL_PORT)])
+
+ device_success_re = re.compile('Starting Device Forwarder.')
+ device_failure_re = re.compile('.*:ERROR:(.*)')
+ index = self._device_process.expect([device_success_re,
+ device_failure_re,
+ pexpect.EOF,
+ pexpect.TIMEOUT],
+ Forwarder._TIMEOUT_SECS)
+ if index == 1:
+ # Failure
+ error_msg = str(self._device_process.match.group(1))
+ logging.error(self._device_process.before)
+ self._CloseProcess()
+ raise Exception('Failed to start Device Forwarder with Error: %s' %
+ error_msg)
+ elif index == 2:
+ logging.error(self._device_process.before)
+ self._CloseProcess()
+ raise Exception('Unexpected EOF while trying to start Device Forwarder.')
+ elif index == 3:
+ logging.error(self._device_process.before)
+ self._CloseProcess()
+ raise Exception('Timeout while trying start Device Forwarder')
+
+ self._host_process = pexpect.spawn(self._host_forwarder_path,
+ ['--adb_port=%s' % (
+ self._host_adb_control_port)] +
+ forward_string)
+
+ # Read the output of the command to determine which device ports where
+ # forwarded to which host ports (necessary if
+ host_success_re = re.compile('Forwarding device port (\d+) to host (\d+):')
+ host_failure_re = re.compile('Couldn\'t start forwarder server for port '
+ 'spec: (\d+):(\d+)')
+ for pair in port_pairs:
+ index = self._host_process.expect([host_success_re,
+ host_failure_re,
+ pexpect.EOF,
+ pexpect.TIMEOUT],
+ Forwarder._TIMEOUT_SECS)
+ if index == 0:
+ # Success
+ device_port = int(self._host_process.match.group(1))
+ host_port = int(self._host_process.match.group(2))
+ self._host_to_device_port_map[host_port] = device_port
+ logging.info("Forwarding device port: %d to host port: %d." %
+ (device_port, host_port))
+ elif index == 1:
+ # Failure
+ device_port = int(self._host_process.match.group(1))
+ host_port = int(self._host_process.match.group(2))
+ self._CloseProcess()
+ raise Exception('Failed to forward port %d to %d' % (device_port,
+ host_port))
+ elif index == 2:
+ logging.error(self._host_process.before)
+ self._CloseProcess()
+ raise Exception('Unexpected EOF while trying to forward ports %s' %
+ port_pairs)
+ elif index == 3:
+ logging.error(self._host_process.before)
+ self._CloseProcess()
+ raise Exception('Timeout while trying to forward ports %s' % port_pairs)
+
+ def _KillHostForwarderBlocking(self, host_pattern, timeout_sec):
+ """Kills any existing host forwarders using the provided pattern.
+
+ Note that this waits until the process terminates.
+ """
+ cmd_helper.RunCmd(['pkill', '-f', host_pattern])
+ elapsed = 0
+ wait_period = 0.1
+ while not cmd_helper.RunCmd(['pgrep', '-f', host_pattern]) and (
+ elapsed < timeout_sec):
+ time.sleep(wait_period)
+ elapsed += wait_period
+ if elapsed >= timeout_sec:
+ raise Exception('Timed out while killing ' + host_pattern)
+
+ def _KillDeviceForwarderBlocking(self, timeout_sec):
+ """Kills any existing device forwarders.
+
+ Note that this waits until the process terminates.
+ """
+ processes_killed = self._adb.KillAllBlocking(
+ 'device_forwarder', timeout_sec)
+ if not processes_killed:
+ pids = self._adb.ExtractPid('device_forwarder')
+ if pids:
+ raise Exception('Timed out while killing device_forwarder')
+
+ def _CloseProcess(self):
+ if self._host_process:
+ self._host_process.close()
+ if self._device_process:
+ self._device_process.close()
+ if self._adb_forward_process:
+ self._adb_forward_process.close()
+ self._host_process = None
+ self._device_process = None
+ self._adb_forward_process = None
+
+ def DevicePortForHostPort(self, host_port):
+ """Get the device port that corresponds to a given host port."""
+ return self._host_to_device_port_map.get(host_port)
+
+ def Close(self):
+ """Terminate the forwarder process."""
+ self._CloseProcess()
diff --git a/media/webrtc/trunk/build/android/pylib/io_stats_parser.py b/media/webrtc/trunk/build/android/pylib/io_stats_parser.py
new file mode 100644
index 000000000..89097abbc
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/io_stats_parser.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Provides an interface to communicate with the device via the adb command.
+
+Assumes adb binary is currently on system path.
+"""
+
+
+import collections
+
+
+def ParseIoStatsLine(line):
+ """Parses a line of io stats into a IoStats named tuple."""
+ # Field definitions: http://www.kernel.org/doc/Documentation/iostats.txt
+ IoStats = collections.namedtuple('IoStats',
+ ['device',
+ 'num_reads_issued',
+ 'num_reads_merged',
+ 'num_sectors_read',
+ 'ms_spent_reading',
+ 'num_writes_completed',
+ 'num_writes_merged',
+ 'num_sectors_written',
+ 'ms_spent_writing',
+ 'num_ios_in_progress',
+ 'ms_spent_doing_io',
+ 'ms_spent_doing_io_weighted',
+ ])
+ fields = line.split()
+ return IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
diff --git a/media/webrtc/trunk/build/android/pylib/java_unittest_utils.py b/media/webrtc/trunk/build/android/pylib/java_unittest_utils.py
new file mode 100644
index 000000000..b5446dcf9
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/java_unittest_utils.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This file is imported by python tests ran by run_python_tests.py."""
+
+import os
+
+import android_commands
+from run_java_tests import TestRunner
+
+
+def _GetPackageName(fname):
+ """Extracts the package name from the test file path."""
+ base_root = os.path.join('com', 'google', 'android')
+ dirname = os.path.dirname(fname)
+ package = dirname[dirname.rfind(base_root):]
+ return package.replace(os.sep, '.')
+
+
+def RunJavaTest(fname, suite, test, ports_to_forward):
+ device = android_commands.GetAttachedDevices()[0]
+ package_name = _GetPackageName(fname)
+ test = package_name + '.' + suite + '#' + test
+ java_test_runner = TestRunner(False, device, [test], False, False, False,
+ False, 0, ports_to_forward)
+ return java_test_runner.Run()
diff --git a/media/webrtc/trunk/build/android/pylib/json_perf_parser.py b/media/webrtc/trunk/build/android/pylib/json_perf_parser.py
new file mode 100644
index 000000000..1a8e61753
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/json_perf_parser.py
@@ -0,0 +1,160 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""A helper module for parsing JSON objects from perf tests results."""
+
+import json
+
+
+def GetAverageRunInfo(json_data, name):
+ """Summarizes TraceEvent JSON data for performance metrics.
+
+ Example JSON Inputs (More tags can be added but these are required):
+ Measuring Duration:
+ [
+ { "cat": "Java",
+ "ts": 10000000000,
+ "ph": "S",
+ "name": "TestTrace"
+ },
+ { "cat": "Java",
+ "ts": 10000004000,
+ "ph": "F",
+ "name": "TestTrace"
+ },
+ ...
+ ]
+
+ Measuring Call Frequency (FPS):
+ [
+ { "cat": "Java",
+ "ts": 10000000000,
+ "ph": "I",
+ "name": "TestTraceFPS"
+ },
+ { "cat": "Java",
+ "ts": 10000004000,
+ "ph": "I",
+ "name": "TestTraceFPS"
+ },
+ ...
+ ]
+
+ Args:
+ json_data: A list of dictonaries each representing a JSON object.
+ name: The 'name' tag to filter on in the JSON file.
+
+ Returns:
+ A dictionary of result data with the following tags:
+ min: The minimum value tracked.
+ max: The maximum value tracked.
+ average: The average of all the values tracked.
+ count: The number of times the category/name pair was tracked.
+ type: The type of tracking ('Instant' for instant tags and 'Span' for
+ begin/end tags.
+ category: The passed in category filter.
+ name: The passed in name filter.
+ data_points: A list of all of the times used to generate this data.
+ units: The units for the values being reported.
+
+ Raises:
+ Exception: if entry contains invalid data.
+ """
+
+ def EntryFilter(entry):
+ return entry['cat'] == 'Java' and entry['name'] == name
+ filtered_entries = filter(EntryFilter, json_data)
+
+ result = {}
+
+ result['min'] = -1
+ result['max'] = -1
+ result['average'] = 0
+ result['count'] = 0
+ result['type'] = 'Unknown'
+ result['category'] = 'Java'
+ result['name'] = name
+ result['data_points'] = []
+ result['units'] = ''
+
+ total_sum = 0
+
+ last_val = 0
+ val_type = None
+ for entry in filtered_entries:
+ if not val_type:
+ if 'mem' in entry:
+ val_type = 'mem'
+
+ def GetVal(entry):
+ return entry['mem']
+
+ result['units'] = 'kb'
+ elif 'ts' in entry:
+ val_type = 'ts'
+
+ def GetVal(entry):
+ return float(entry['ts']) / 1000.0
+
+ result['units'] = 'ms'
+ else:
+ raise Exception('Entry did not contain valid value info: %s' % entry)
+
+ if not val_type in entry:
+ raise Exception('Entry did not contain expected value type "%s" '
+ 'information: %s' % (val_type, entry))
+ val = GetVal(entry)
+ if (entry['ph'] == 'S' and
+ (result['type'] == 'Unknown' or result['type'] == 'Span')):
+ result['type'] = 'Span'
+ last_val = val
+ elif ((entry['ph'] == 'F' and result['type'] == 'Span') or
+ (entry['ph'] == 'I' and (result['type'] == 'Unknown' or
+ result['type'] == 'Instant'))):
+ if last_val > 0:
+ delta = val - last_val
+ if result['min'] == -1 or result['min'] > delta:
+ result['min'] = delta
+ if result['max'] == -1 or result['max'] < delta:
+ result['max'] = delta
+ total_sum += delta
+ result['count'] += 1
+ result['data_points'].append(delta)
+ if entry['ph'] == 'I':
+ result['type'] = 'Instant'
+ last_val = val
+ if result['count'] > 0: result['average'] = total_sum / result['count']
+
+ return result
+
+
+def GetAverageRunInfoFromJSONString(json_string, name):
+ """Returns the results from GetAverageRunInfo using a JSON string.
+
+ Args:
+ json_string: The string containing JSON.
+ name: The 'name' tag to filter on in the JSON file.
+
+ Returns:
+ See GetAverageRunInfo Returns section.
+ """
+ return GetAverageRunInfo(json.loads(json_string), name)
+
+
+def GetAverageRunInfoFromFile(json_file, name):
+ """Returns the results from GetAverageRunInfo using a JSON file.
+
+ Args:
+ json_file: The path to a JSON file.
+ name: The 'name' tag to filter on in the JSON file.
+
+ Returns:
+ See GetAverageRunInfo Returns section.
+ """
+ with open(json_file, 'r') as f:
+ data = f.read()
+ perf = json.loads(data)
+
+ return GetAverageRunInfo(perf, name)
diff --git a/media/webrtc/trunk/build/android/pylib/perf_tests_helper.py b/media/webrtc/trunk/build/android/pylib/perf_tests_helper.py
new file mode 100644
index 000000000..c0a3ee413
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/perf_tests_helper.py
@@ -0,0 +1,120 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+import android_commands
+import math
+
+# Valid values of result type.
+RESULT_TYPES = {'unimportant': 'RESULT ',
+ 'default': '*RESULT ',
+ 'informational': ''}
+
+
+def _EscapePerfResult(s):
+ """Escapes |s| for use in a perf result."""
+ # Colons (:) and equal signs (=) are not allowed, and we chose an arbitrary
+ # limit of 40 chars.
+ return re.sub(':|=', '_', s[:40])
+
+
+def PrintPerfResult(measurement, trace, values, units, result_type='default',
+ print_to_stdout=True):
+ """Prints numerical data to stdout in the format required by perf tests.
+
+ The string args may be empty but they must not contain any colons (:) or
+ equals signs (=).
+
+ Args:
+ measurement: A description of the quantity being measured, e.g. "vm_peak".
+ trace: A description of the particular data point, e.g. "reference".
+ values: A list of numeric measured values.
+ units: A description of the units of measure, e.g. "bytes".
+ result_type: A tri-state that accepts values of ['unimportant', 'default',
+ 'informational']. 'unimportant' prints RESULT, 'default' prints *RESULT
+ and 'informational' prints nothing.
+ print_to_stdout: If True, prints the output in stdout instead of returning
+ the output to caller.
+
+ Returns:
+ String of the formated perf result.
+ """
+ assert result_type in RESULT_TYPES, 'result type: %s is invalid' % result_type
+
+ assert isinstance(values, list)
+ assert len(values)
+ assert '/' not in measurement
+ avg = None
+ sd = None
+ if len(values) > 1:
+ try:
+ value = '[%s]' % ','.join([str(v) for v in values])
+ avg = sum([float(v) for v in values]) / len(values)
+ sqdiffs = [(float(v) - avg) ** 2 for v in values]
+ variance = sum(sqdiffs) / (len(values) - 1)
+ sd = math.sqrt(variance)
+ except ValueError:
+ value = ", ".join(values)
+ else:
+ value = values[0]
+
+ trace_name = _EscapePerfResult(trace)
+ output = '%s%s: %s%s%s %s' % (
+ RESULT_TYPES[result_type],
+ _EscapePerfResult(measurement),
+ trace_name,
+ # Do not show equal sign if the trace is empty. Usually it happens when
+ # measurement is enough clear to describe the result.
+ '= ' if trace_name else '',
+ value,
+ units)
+ if avg:
+ output += '\nAvg %s: %f%s' % (measurement, avg, units)
+ if sd:
+ output += '\nSd %s: %f%s' % (measurement, sd, units)
+ if print_to_stdout:
+ print output
+ return output
+
+
+class PerfTestSetup(object):
+ """Provides methods for setting up a device for perf testing."""
+ _DROP_CACHES = '/proc/sys/vm/drop_caches'
+ _SCALING_GOVERNOR = '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor'
+
+ def __init__(self, adb):
+ self._adb = adb
+ num_cpus = self._adb.GetFileContents('/sys/devices/system/cpu/online',
+ log_result=False)
+ assert num_cpus, 'Unable to find /sys/devices/system/cpu/online'
+ self._num_cpus = int(num_cpus[0].split('-')[-1])
+ self._original_scaling_governor = None
+
+ def DropRamCaches(self):
+ """Drops the filesystem ram caches for performance testing."""
+ if not self._adb.IsRootEnabled():
+ self._adb.EnableAdbRoot()
+ self._adb.RunShellCommand('sync')
+ self._adb.RunShellCommand('echo 3 > ' + PerfTestSetup._DROP_CACHES)
+
+ def SetUp(self):
+ """Sets up performance tests."""
+ if not self._original_scaling_governor:
+ self._original_scaling_governor = self._adb.GetFileContents(
+ PerfTestSetup._SCALING_GOVERNOR % 0,
+ log_result=False)[0]
+ self._SetScalingGovernorInternal('performance')
+ self.DropRamCaches()
+
+ def TearDown(self):
+ """Tears down performance tests."""
+ if self._original_scaling_governor:
+ self._SetScalingGovernorInternal(self._original_scaling_governor)
+ self._original_scaling_governor = None
+
+ def _SetScalingGovernorInternal(self, value):
+ for cpu in range(self._num_cpus):
+ self._adb.RunShellCommand(
+ ('echo %s > ' + PerfTestSetup._SCALING_GOVERNOR) % (value, cpu))
diff --git a/media/webrtc/trunk/build/android/pylib/ports.py b/media/webrtc/trunk/build/android/pylib/ports.py
new file mode 100644
index 000000000..e9b6b901e
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/ports.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions that deals with local and device ports."""
+
+import contextlib
+import fcntl
+import httplib
+import logging
+import os
+import re
+import socket
+import traceback
+
+import cmd_helper
+import constants
+
+
+#The following two methods are used to allocate the port source for various
+# types of test servers. Because some net relates tests can be run on shards
+# at same time, it's important to have a mechanism to allocate the port process
+# safe. In here, we implement the safe port allocation by leveraging flock.
+def ResetTestServerPortAllocation():
+ """Reset the port allocation to start from TEST_SERVER_PORT_FIRST.
+
+ Returns:
+ Returns True if reset successes. Otherwise returns False.
+ """
+ try:
+ with open(constants.TEST_SERVER_PORT_FILE, 'w') as fp:
+ fp.write('%d' % constants.TEST_SERVER_PORT_FIRST)
+ if os.path.exists(constants.TEST_SERVER_PORT_LOCKFILE):
+ os.unlink(constants.TEST_SERVER_PORT_LOCKFILE)
+ return True
+ except Exception as e:
+ logging.error(e)
+ return False
+
+
+def AllocateTestServerPort():
+ """Allocate a port incrementally.
+
+ Returns:
+ Returns a valid port which should be in between TEST_SERVER_PORT_FIRST and
+ TEST_SERVER_PORT_LAST. Returning 0 means no more valid port can be used.
+ """
+ port = 0
+ ports_tried = []
+ try:
+ fp_lock = open(constants.TEST_SERVER_PORT_LOCKFILE, 'w')
+ fcntl.flock(fp_lock, fcntl.LOCK_EX)
+ # Get current valid port and calculate next valid port.
+ assert os.path.exists(constants.TEST_SERVER_PORT_FILE)
+ with open(constants.TEST_SERVER_PORT_FILE, 'r+') as fp:
+ port = int(fp.read())
+ ports_tried.append(port)
+ while IsHostPortUsed(port):
+ port += 1
+ ports_tried.append(port)
+ if (port > constants.TEST_SERVER_PORT_LAST or
+ port < constants.TEST_SERVER_PORT_FIRST):
+ port = 0
+ else:
+ fp.seek(0, os.SEEK_SET)
+ fp.write('%d' % (port + 1))
+ except Exception as e:
+ logging.info(e)
+ finally:
+ if fp_lock:
+ fcntl.flock(fp_lock, fcntl.LOCK_UN)
+ fp_lock.close()
+ if port:
+ logging.info('Allocate port %d for test server.', port)
+ else:
+ logging.error('Could not allocate port for test server. '
+ 'List of ports tried: %s', str(ports_tried))
+ return port
+
+
+def IsHostPortUsed(host_port):
+ """Checks whether the specified host port is used or not.
+
+ Uses -n -P to inhibit the conversion of host/port numbers to host/port names.
+
+ Args:
+ host_port: Port on host we want to check.
+
+ Returns:
+ True if the port on host is already used, otherwise returns False.
+ """
+ port_info = '(127\.0\.0\.1)|(localhost)\:%d' % host_port
+ # TODO(jnd): Find a better way to filter the port.
+ re_port = re.compile(port_info, re.MULTILINE)
+ if re_port.findall(cmd_helper.GetCmdOutput(['lsof', '-nPi:%d' % host_port])):
+ return True
+ return False
+
+
+def IsDevicePortUsed(adb, device_port, state=''):
+ """Checks whether the specified device port is used or not.
+
+ Args:
+ adb: Instance of AndroidCommands for talking to the device.
+ device_port: Port on device we want to check.
+ state: String of the specified state. Default is empty string, which
+ means any state.
+
+ Returns:
+ True if the port on device is already used, otherwise returns False.
+ """
+ base_url = '127.0.0.1:%d' % device_port
+ netstat_results = adb.RunShellCommand('netstat', log_result=False)
+ for single_connect in netstat_results:
+ # Column 3 is the local address which we want to check with.
+ connect_results = single_connect.split()
+ is_state_match = connect_results[5] == state if state else True
+ if connect_results[3] == base_url and is_state_match:
+ return True
+ return False
+
+
+def IsHttpServerConnectable(host, port, tries=3, command='GET', path='/',
+ expected_read='', timeout=2):
+ """Checks whether the specified http server is ready to serve request or not.
+
+ Args:
+ host: Host name of the HTTP server.
+ port: Port number of the HTTP server.
+ tries: How many times we want to test the connection. The default value is
+ 3.
+ command: The http command we use to connect to HTTP server. The default
+ command is 'GET'.
+ path: The path we use when connecting to HTTP server. The default path is
+ '/'.
+ expected_read: The content we expect to read from the response. The default
+ value is ''.
+ timeout: Timeout (in seconds) for each http connection. The default is 2s.
+
+ Returns:
+ Tuple of (connect status, client error). connect status is a boolean value
+ to indicate whether the server is connectable. client_error is the error
+ message the server returns when connect status is false.
+ """
+ assert tries >= 1
+ for i in xrange(0, tries):
+ client_error = None
+ try:
+ with contextlib.closing(httplib.HTTPConnection(
+ host, port, timeout=timeout)) as http:
+ # Output some debug information when we have tried more than 2 times.
+ http.set_debuglevel(i >= 2)
+ http.request(command, path)
+ r = http.getresponse()
+ content = r.read()
+ if r.status == 200 and r.reason == 'OK' and content == expected_read:
+ return (True, '')
+ client_error = ('Bad response: %s %s version %s\n ' %
+ (r.status, r.reason, r.version) +
+ '\n '.join([': '.join(h) for h in r.getheaders()]))
+ except (httplib.HTTPException, socket.error) as e:
+ # Probably too quick connecting: try again.
+ exception_error_msgs = traceback.format_exception_only(type(e), e)
+ if exception_error_msgs:
+ client_error = ''.join(exception_error_msgs)
+ # Only returns last client_error.
+ return (False, client_error or 'Timeout')
diff --git a/media/webrtc/trunk/build/android/pylib/python_test_base.py b/media/webrtc/trunk/build/android/pylib/python_test_base.py
new file mode 100644
index 000000000..3517cdda8
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/python_test_base.py
@@ -0,0 +1,168 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Base class for Android Python-driven tests.
+
+This test case is intended to serve as the base class for any Python-driven
+tests. It is similar to the Python unitttest module in that the user's tests
+inherit from this case and add their tests in that case.
+
+When a PythonTestBase object is instantiated, its purpose is to run only one of
+its tests. The test runner gives it the name of the test the instance will
+run. The test runner calls SetUp with the Android device ID which the test will
+run against. The runner runs the test method itself, collecting the result,
+and calls TearDown.
+
+Tests can basically do whatever they want in the test methods, such as call
+Java tests using _RunJavaTests. Those methods have the advantage of massaging
+the Java test results into Python test results.
+"""
+
+import logging
+import os
+import time
+
+import android_commands
+import apk_info
+from run_java_tests import TestRunner
+from test_result import SingleTestResult, TestResults
+
+
+# aka the parent of com.google.android
+BASE_ROOT = 'src' + os.sep
+
+
+class PythonTestBase(object):
+ """Base class for Python-driven tests."""
+
+ def __init__(self, test_name):
+ # test_name must match one of the test methods defined on a subclass which
+ # inherits from this class.
+ # It's stored so we can do the attr lookup on demand, allowing this class
+ # to be pickled, a requirement for the multiprocessing module.
+ self.test_name = test_name
+ class_name = self.__class__.__name__
+ self.qualified_name = class_name + '.' + self.test_name
+
+ def SetUp(self, options):
+ self.options = options
+ self.shard_index = self.options.shard_index
+ self.device_id = self.options.device_id
+ self.adb = android_commands.AndroidCommands(self.device_id)
+ self.ports_to_forward = []
+
+ def TearDown(self):
+ pass
+
+ def Run(self):
+ logging.warning('Running Python-driven test: %s', self.test_name)
+ return getattr(self, self.test_name)()
+
+ def _RunJavaTest(self, fname, suite, test):
+ """Runs a single Java test with a Java TestRunner.
+
+ Args:
+ fname: filename for the test (e.g. foo/bar/baz/tests/FooTest.py)
+ suite: name of the Java test suite (e.g. FooTest)
+ test: name of the test method to run (e.g. testFooBar)
+
+ Returns:
+ TestResults object with a single test result.
+ """
+ test = self._ComposeFullTestName(fname, suite, test)
+ apks = [apk_info.ApkInfo(self.options.test_apk_path,
+ self.options.test_apk_jar_path)]
+ java_test_runner = TestRunner(self.options, self.device_id, [test], False,
+ self.shard_index,
+ apks,
+ self.ports_to_forward)
+ return java_test_runner.Run()
+
+ def _RunJavaTests(self, fname, tests):
+ """Calls a list of tests and stops at the first test failure.
+
+ This method iterates until either it encounters a non-passing test or it
+ exhausts the list of tests. Then it returns the appropriate Python result.
+
+ Args:
+ fname: filename for the Python test
+ tests: a list of Java test names which will be run
+
+ Returns:
+ A TestResults object containing a result for this Python test.
+ """
+ start_ms = int(time.time()) * 1000
+
+ result = None
+ for test in tests:
+ # We're only running one test at a time, so this TestResults object will
+ # hold only one result.
+ suite, test_name = test.split('.')
+ result = self._RunJavaTest(fname, suite, test_name)
+ # A non-empty list means the test did not pass.
+ if result.GetAllBroken():
+ break
+
+ duration_ms = int(time.time()) * 1000 - start_ms
+
+ # Do something with result.
+ return self._ProcessResults(result, start_ms, duration_ms)
+
+ def _ProcessResults(self, result, start_ms, duration_ms):
+ """Translates a Java test result into a Python result for this test.
+
+ The TestRunner class that we use under the covers will return a test result
+ for that specific Java test. However, to make reporting clearer, we have
+ this method to abstract that detail and instead report that as a failure of
+ this particular test case while still including the Java stack trace.
+
+ Args:
+ result: TestResults with a single Java test result
+ start_ms: the time the test started
+ duration_ms: the length of the test
+
+ Returns:
+ A TestResults object containing a result for this Python test.
+ """
+ test_results = TestResults()
+
+ # If our test is in broken, then it crashed/failed.
+ broken = result.GetAllBroken()
+ if broken:
+ # Since we have run only one test, take the first and only item.
+ single_result = broken[0]
+
+ log = single_result.log
+ if not log:
+ log = 'No logging information.'
+
+ python_result = SingleTestResult(self.qualified_name, start_ms,
+ duration_ms,
+ log)
+
+ # Figure out where the test belonged. There's probably a cleaner way of
+ # doing this.
+ if single_result in result.crashed:
+ test_results.crashed = [python_result]
+ elif single_result in result.failed:
+ test_results.failed = [python_result]
+ elif single_result in result.unknown:
+ test_results.unknown = [python_result]
+
+ else:
+ python_result = SingleTestResult(self.qualified_name, start_ms,
+ duration_ms)
+ test_results.ok = [python_result]
+
+ return test_results
+
+ def _ComposeFullTestName(self, fname, suite, test):
+ package_name = self._GetPackageName(fname)
+ return package_name + '.' + suite + '#' + test
+
+ def _GetPackageName(self, fname):
+ """Extracts the package name from the test file path."""
+ dirname = os.path.dirname(fname)
+ package = dirname[dirname.rfind(BASE_ROOT) + len(BASE_ROOT):]
+ return package.replace(os.sep, '.')
diff --git a/media/webrtc/trunk/build/android/pylib/python_test_caller.py b/media/webrtc/trunk/build/android/pylib/python_test_caller.py
new file mode 100644
index 000000000..882b89299
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/python_test_caller.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper module for calling python-based tests."""
+
+
+import logging
+import sys
+import time
+
+from test_result import TestResults
+
+
+def CallPythonTest(test, options):
+ """Invokes a test function and translates Python exceptions into test results.
+
+ This method invokes SetUp()/TearDown() on the test. It is intended to be
+ resilient to exceptions in SetUp(), the test itself, and TearDown(). Any
+ Python exception means the test is marked as failed, and the test result will
+ contain information about the exception.
+
+ If SetUp() raises an exception, the test is not run.
+
+ If TearDown() raises an exception, the test is treated as a failure. However,
+ if the test itself raised an exception beforehand, that stack trace will take
+ precedence whether or not TearDown() also raised an exception.
+
+ shard_index is not applicable in single-device scenarios, when test execution
+ is serial rather than parallel. Tests can use this to bring up servers with
+ unique port numbers, for example. See also python_test_sharder.
+
+ Args:
+ test: an object which is ostensibly a subclass of PythonTestBase.
+ options: Options to use for setting up tests.
+
+ Returns:
+ A TestResults object which contains any results produced by the test or, in
+ the case of a Python exception, the Python exception info.
+ """
+
+ start_date_ms = int(time.time()) * 1000
+ failed = False
+
+ try:
+ test.SetUp(options)
+ except Exception:
+ failed = True
+ logging.exception(
+ 'Caught exception while trying to run SetUp() for test: ' +
+ test.qualified_name)
+ # Tests whose SetUp() method has failed are likely to fail, or at least
+ # yield invalid results.
+ exc_info = sys.exc_info()
+ return TestResults.FromPythonException(test.qualified_name, start_date_ms,
+ exc_info)
+
+ try:
+ result = test.Run()
+ except Exception:
+ # Setting this lets TearDown() avoid stomping on our stack trace from Run()
+ # should TearDown() also raise an exception.
+ failed = True
+ logging.exception('Caught exception while trying to run test: ' +
+ test.qualified_name)
+ exc_info = sys.exc_info()
+ result = TestResults.FromPythonException(test.qualified_name, start_date_ms,
+ exc_info)
+
+ try:
+ test.TearDown()
+ except Exception:
+ logging.exception(
+ 'Caught exception while trying run TearDown() for test: ' +
+ test.qualified_name)
+ if not failed:
+ # Don't stomp the error during the test if TearDown blows up. This is a
+ # trade-off: if the test fails, this will mask any problem with TearDown
+ # until the test is fixed.
+ exc_info = sys.exc_info()
+ result = TestResults.FromPythonException(test.qualified_name,
+ start_date_ms, exc_info)
+
+ return result
diff --git a/media/webrtc/trunk/build/android/pylib/python_test_sharder.py b/media/webrtc/trunk/build/android/pylib/python_test_sharder.py
new file mode 100644
index 000000000..e27096d78
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/python_test_sharder.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Takes care of sharding the python-drive tests in multiple devices."""
+
+import copy
+import logging
+import multiprocessing
+
+from python_test_caller import CallPythonTest
+from run_java_tests import FatalTestException
+import sharded_tests_queue
+from test_result import TestResults
+
+
+def SetTestsContainer(tests_container):
+ """Sets PythonTestSharder as a top-level field.
+
+ PythonTestSharder uses multiprocessing.Pool, which creates a pool of
+ processes. This is used to initialize each worker in the pool, ensuring that
+ each worker has access to this shared pool of tests.
+
+ The multiprocessing module requires that this be a top-level method.
+
+ Args:
+ tests_container: the container for all the tests.
+ """
+ PythonTestSharder.tests_container = tests_container
+
+
+def _DefaultRunnable(test_runner):
+ """A default runnable for a PythonTestRunner.
+
+ Args:
+ test_runner: A PythonTestRunner which will run tests.
+
+ Returns:
+ The test results.
+ """
+ return test_runner.RunTests()
+
+
+class PythonTestRunner(object):
+ """Thin wrapper around a list of PythonTestBase instances.
+
+ This is meant to be a long-lived object which can run multiple Python tests
+ within its lifetime. Tests will receive the device_id and shard_index.
+
+ The shard index affords the ability to create unique port numbers (e.g.
+ DEFAULT_PORT + shard_index) if the test so wishes.
+ """
+
+ def __init__(self, options):
+ """Constructor.
+
+ Args:
+ options: Options to use for setting up tests.
+ """
+ self.options = options
+
+ def RunTests(self):
+ """Runs tests from the shared pool of tests, aggregating results.
+
+ Returns:
+ A list of test results for all of the tests which this runner executed.
+ """
+ tests = PythonTestSharder.tests_container
+
+ results = []
+ for t in tests:
+ res = CallPythonTest(t, self.options)
+ results.append(res)
+
+ return TestResults.FromTestResults(results)
+
+
+class PythonTestSharder(object):
+ """Runs Python tests in parallel on multiple devices.
+
+ This is lifted more or less wholesale from BaseTestRunner.
+
+ Under the covers, it creates a pool of long-lived PythonTestRunners, which
+ execute tests from the pool of tests.
+
+ Args:
+ attached_devices: a list of device IDs attached to the host.
+ available_tests: a list of tests to run which subclass PythonTestBase.
+ options: Options to use for setting up tests.
+
+ Returns:
+ An aggregated list of test results.
+ """
+ tests_container = None
+
+ def __init__(self, attached_devices, available_tests, options):
+ self.options = options
+ self.attached_devices = attached_devices
+ self.retries = options.shard_retries
+ self.tests = available_tests
+
+ def _SetupSharding(self, tests):
+ """Creates the shared pool of tests and makes it available to test runners.
+
+ Args:
+ tests: the list of tests which will be consumed by workers.
+ """
+ SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
+ len(self.attached_devices), tests))
+
+ def RunShardedTests(self):
+ """Runs tests in parallel using a pool of workers.
+
+ Returns:
+ A list of test results aggregated from all test runs.
+ """
+ logging.warning('*' * 80)
+ logging.warning('Sharding in ' + str(len(self.attached_devices)) +
+ ' devices.')
+ logging.warning('Note that the output is not synchronized.')
+ logging.warning('Look for the "Final result" banner in the end.')
+ logging.warning('*' * 80)
+ all_passed = []
+ test_results = TestResults()
+ tests_to_run = self.tests
+ for retry in xrange(self.retries):
+ logging.warning('Try %d of %d', retry + 1, self.retries)
+ self._SetupSharding(self.tests)
+ test_runners = self._MakeTestRunners(self.attached_devices)
+ logging.warning('Starting...')
+ pool = multiprocessing.Pool(len(self.attached_devices),
+ SetTestsContainer,
+ [PythonTestSharder.tests_container])
+
+ # List of TestResults objects from each test execution.
+ try:
+ results_lists = pool.map(_DefaultRunnable, test_runners)
+ except Exception:
+ logging.exception('Unable to run tests. Something with the '
+ 'PythonTestRunners has gone wrong.')
+ raise FatalTestException('PythonTestRunners were unable to run tests.')
+
+ test_results = TestResults.FromTestResults(results_lists)
+ # Accumulate passing results.
+ all_passed += test_results.ok
+ # If we have failed tests, map them to tests to retry.
+ failed_tests = test_results.GetAllBroken()
+ tests_to_run = self._GetTestsToRetry(self.tests,
+ failed_tests)
+
+ # Bail out early if we have no more tests. This can happen if all tests
+ # pass before we're out of retries, for example.
+ if not tests_to_run:
+ break
+
+ final_results = TestResults()
+ # all_passed has accumulated all passing test results.
+ # test_results will have the results from the most recent run, which could
+ # include a variety of failure modes (unknown, crashed, failed, etc).
+ final_results = test_results
+ final_results.ok = all_passed
+
+ return final_results
+
+ def _MakeTestRunners(self, attached_devices):
+ """Initialize and return a list of PythonTestRunners.
+
+ Args:
+ attached_devices: list of device IDs attached to host.
+
+ Returns:
+ A list of PythonTestRunners, one for each device.
+ """
+ test_runners = []
+ for index, device in enumerate(attached_devices):
+ logging.warning('*' * 80)
+ logging.warning('Creating shard %d for %s', index, device)
+ logging.warning('*' * 80)
+ # Bind the PythonTestRunner to a device & shard index. Give it the
+ # runnable which it will use to actually execute the tests.
+ test_options = copy.deepcopy(self.options)
+ test_options.ensure_value('device_id', device)
+ test_options.ensure_value('shard_index', index)
+ test_runner = PythonTestRunner(test_options)
+ test_runners.append(test_runner)
+
+ return test_runners
+
+ def _GetTestsToRetry(self, available_tests, failed_tests):
+ """Infers a list of tests to retry from failed tests and available tests.
+
+ Args:
+ available_tests: a list of tests which subclass PythonTestBase.
+ failed_tests: a list of SingleTestResults representing failed tests.
+
+ Returns:
+ A list of test objects which correspond to test names found in
+ failed_tests, or an empty list if there is no correspondence.
+ """
+ failed_test_names = map(lambda t: t.test_name, failed_tests)
+ tests_to_retry = [t for t in available_tests
+ if t.qualified_name in failed_test_names]
+ return tests_to_retry
diff --git a/media/webrtc/trunk/build/android/pylib/run_java_tests.py b/media/webrtc/trunk/build/android/pylib/run_java_tests.py
new file mode 100644
index 000000000..fc0a13fd8
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/run_java_tests.py
@@ -0,0 +1,591 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs the Java tests. See more information on run_instrumentation_tests.py."""
+
+import fnmatch
+import logging
+import os
+import re
+import shutil
+import sys
+import time
+
+import android_commands
+import apk_info
+from base_test_runner import BaseTestRunner
+from base_test_sharder import BaseTestSharder, SetTestsContainer
+import cmd_helper
+import constants
+import errors
+from forwarder import Forwarder
+from json_perf_parser import GetAverageRunInfoFromJSONString
+from perf_tests_helper import PrintPerfResult
+import sharded_tests_queue
+from test_result import SingleTestResult, TestResults
+import valgrind_tools
+
+_PERF_TEST_ANNOTATION = 'PerfTest'
+
+
+class FatalTestException(Exception):
+ """A fatal test exception."""
+ pass
+
+
+def _TestNameToExpectation(test_name):
+ # A test name is a Package.Path.Class#testName; convert to what we use in
+ # the expectation file.
+ return '.'.join(test_name.replace('#', '.').split('.')[-2:])
+
+
+def FilterTests(test_names, pattern_list, inclusive):
+ """Filters |test_names| using a list of patterns.
+
+ Args:
+ test_names: A list of test names.
+ pattern_list: A list of patterns.
+ inclusive: If True, returns the tests that match any pattern. if False,
+ returns the tests that do not match any pattern.
+ Returns:
+ A list of test names.
+ """
+ ret = []
+ for t in test_names:
+ has_match = False
+ for pattern in pattern_list:
+ has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t),
+ pattern)
+ if has_match == inclusive:
+ ret += [t]
+ return ret
+
+
+class TestRunner(BaseTestRunner):
+ """Responsible for running a series of tests connected to a single device."""
+
+ _DEVICE_DATA_DIR = 'chrome/test/data'
+ _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''),
+ 'external/emma/lib/emma.jar')
+ _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es'
+ _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR')
+ _COVERAGE_FILENAME = 'coverage.ec'
+ _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' +
+ _COVERAGE_FILENAME)
+ _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP',
+ ''),
+ 'out/target/common/obj/APPS',
+ 'Chrome_intermediates/coverage.em')
+ _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
+ _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
+ '/chrome-profile*')
+ _DEVICE_HAS_TEST_FILES = {}
+
+ def __init__(self, options, device, tests_iter, coverage, shard_index, apks,
+ ports_to_forward):
+ """Create a new TestRunner.
+
+ Args:
+ options: An options object with the following required attributes:
+ - build_type: 'Release' or 'Debug'.
+ - install_apk: Re-installs the apk if opted.
+ - save_perf_json: Whether or not to save the JSON file from UI perf
+ tests.
+ - screenshot_failures: Take a screenshot for a test failure
+ - tool: Name of the Valgrind tool.
+ - wait_for_debugger: blocks until the debugger is connected.
+ device: Attached android device.
+ tests_iter: A list of tests to be run.
+ coverage: Collects coverage information if opted.
+ shard_index: shard # for this TestRunner, used to create unique port
+ numbers.
+ apks: A list of ApkInfo objects need to be installed. The first element
+ should be the tests apk, the rests could be the apks used in test.
+ The default is ChromeTest.apk.
+ ports_to_forward: A list of port numbers for which to set up forwarders.
+ Can be optionally requested by a test case.
+ Raises:
+ FatalTestException: if coverage metadata is not available.
+ """
+ BaseTestRunner.__init__(
+ self, device, options.tool, shard_index, options.build_type)
+
+ if not apks:
+ apks = [apk_info.ApkInfo(options.test_apk_path,
+ options.test_apk_jar_path)]
+
+ self.build_type = options.build_type
+ self.install_apk = options.install_apk
+ self.save_perf_json = options.save_perf_json
+ self.screenshot_failures = options.screenshot_failures
+ self.wait_for_debugger = options.wait_for_debugger
+
+ self.tests_iter = tests_iter
+ self.coverage = coverage
+ self.apks = apks
+ self.test_apk = apks[0]
+ self.instrumentation_class_path = self.test_apk.GetPackageName()
+ self.ports_to_forward = ports_to_forward
+
+ self.test_results = TestResults()
+ self.forwarder = None
+
+ if self.coverage:
+ if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
+ os.remove(TestRunner._COVERAGE_MERGED_FILENAME)
+ if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH):
+ raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
+ ' : Coverage meta info [' +
+ TestRunner._COVERAGE_META_INFO_PATH +
+ '] does not exist.')
+ if (not TestRunner._COVERAGE_WEB_ROOT_DIR or
+ not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)):
+ raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
+ ' : Path specified in $EMMA_WEB_ROOTDIR [' +
+ TestRunner._COVERAGE_WEB_ROOT_DIR +
+ '] does not exist.')
+
+ def _GetTestsIter(self):
+ if not self.tests_iter:
+ # multiprocessing.Queue can't be pickled across processes if we have it as
+ # a member set during constructor. Grab one here instead.
+ self.tests_iter = (BaseTestSharder.tests_container)
+ assert self.tests_iter
+ return self.tests_iter
+
+ def CopyTestFilesOnce(self):
+ """Pushes the test data files to the device. Installs the apk if opted."""
+ if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
+ logging.warning('Already copied test files to device %s, skipping.',
+ self.device)
+ return
+ host_test_files = [
+ ('android_webview/test/data/device_files', 'webview'),
+ ('content/test/data/android/device_files', 'content'),
+ ('chrome/test/data/android/device_files', 'chrome')
+ ]
+ for (host_src, dst_layer) in host_test_files:
+ host_test_files_path = constants.CHROME_DIR + '/' + host_src
+ if os.path.exists(host_test_files_path):
+ self.adb.PushIfNeeded(host_test_files_path,
+ self.adb.GetExternalStorage() + '/' +
+ TestRunner._DEVICE_DATA_DIR + '/' + dst_layer)
+ if self.install_apk:
+ for apk in self.apks:
+ self.adb.ManagedInstall(apk.GetApkPath(),
+ package_name=apk.GetPackageName())
+ self.tool.CopyFiles()
+ TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True
+
+ def SaveCoverageData(self, test):
+ """Saves the Emma coverage data before it's overwritten by the next test.
+
+ Args:
+ test: the test whose coverage data is collected.
+ """
+ if not self.coverage:
+ return
+ if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH,
+ constants.CHROME_DIR):
+ logging.error('ERROR: Unable to find file ' +
+ TestRunner._COVERAGE_RESULT_PATH +
+ ' on the device for test ' + test)
+ pulled_coverage_file = os.path.join(constants.CHROME_DIR,
+ TestRunner._COVERAGE_FILENAME)
+ if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
+ cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge',
+ '-in', pulled_coverage_file,
+ '-in', TestRunner._COVERAGE_MERGED_FILENAME,
+ '-out', TestRunner._COVERAGE_MERGED_FILENAME]
+ cmd_helper.RunCmd(cmd)
+ else:
+ shutil.copy(pulled_coverage_file,
+ TestRunner._COVERAGE_MERGED_FILENAME)
+ os.remove(pulled_coverage_file)
+
+ def GenerateCoverageReportIfNeeded(self):
+ """Uses the Emma to generate a coverage report and a html page."""
+ if not self.coverage:
+ return
+ cmd = ['java', '-classpath', TestRunner._EMMA_JAR,
+ 'emma', 'report', '-r', 'html',
+ '-in', TestRunner._COVERAGE_MERGED_FILENAME,
+ '-in', TestRunner._COVERAGE_META_INFO_PATH]
+ cmd_helper.RunCmd(cmd)
+ new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
+ time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M'))
+ shutil.copytree('coverage', new_dir)
+
+ latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
+ 'Latest_Coverage_Run')
+ if os.path.exists(latest_dir):
+ shutil.rmtree(latest_dir)
+ os.mkdir(latest_dir)
+ webserver_new_index = os.path.join(new_dir, 'index.html')
+ webserver_new_files = os.path.join(new_dir, '_files')
+ webserver_latest_index = os.path.join(latest_dir, 'index.html')
+ webserver_latest_files = os.path.join(latest_dir, '_files')
+ # Setup new softlinks to last result.
+ os.symlink(webserver_new_index, webserver_latest_index)
+ os.symlink(webserver_new_files, webserver_latest_files)
+ cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir])
+
+ def _GetInstrumentationArgs(self):
+ ret = {}
+ if self.coverage:
+ ret['coverage'] = 'true'
+ if self.wait_for_debugger:
+ ret['debug'] = 'true'
+ return ret
+
+ def _TakeScreenshot(self, test):
+ """Takes a screenshot from the device."""
+ screenshot_tool = os.path.join(constants.CHROME_DIR,
+ 'third_party/android_tools/sdk/tools/monkeyrunner')
+ screenshot_script = os.path.join(constants.CHROME_DIR,
+ 'build/android/monkeyrunner_screenshot.py')
+ screenshot_path = os.path.join(constants.CHROME_DIR,
+ 'out_screenshots')
+ if not os.path.exists(screenshot_path):
+ os.mkdir(screenshot_path)
+ screenshot_name = os.path.join(screenshot_path, test + '.png')
+ logging.info('Taking screenshot named %s', screenshot_name)
+ cmd_helper.RunCmd([screenshot_tool, screenshot_script,
+ '--serial', self.device,
+ '--file', screenshot_name])
+
+ def SetUp(self):
+ """Sets up the test harness and device before all tests are run."""
+ super(TestRunner, self).SetUp()
+ if not self.adb.IsRootEnabled():
+ logging.warning('Unable to enable java asserts for %s, non rooted device',
+ self.device)
+ else:
+ if self.adb.SetJavaAssertsEnabled(enable=True):
+ self.adb.Reboot(full_reboot=False)
+
+ # We give different default value to launch HTTP server based on shard index
+ # because it may have race condition when multiple processes are trying to
+ # launch lighttpd with same port at same time.
+ http_server_ports = self.LaunchTestHttpServer(
+ os.path.join(constants.CHROME_DIR),
+ (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index))
+ if self.ports_to_forward:
+ port_pairs = [(port, port) for port in self.ports_to_forward]
+ # We need to remember which ports the HTTP server is using, since the
+ # forwarder will stomp on them otherwise.
+ port_pairs.append(http_server_ports)
+ self.forwarder = Forwarder(
+ self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
+ self.CopyTestFilesOnce()
+ self.flags.AddFlags(['--enable-test-intents'])
+
+ def TearDown(self):
+ """Cleans up the test harness and saves outstanding data from test run."""
+ if self.forwarder:
+ self.forwarder.Close()
+ self.GenerateCoverageReportIfNeeded()
+ super(TestRunner, self).TearDown()
+
+ def TestSetup(self, test):
+ """Sets up the test harness for running a particular test.
+
+ Args:
+ test: The name of the test that will be run.
+ """
+ self.SetupPerfMonitoringIfNeeded(test)
+ self._SetupIndividualTestTimeoutScale(test)
+ self.tool.SetupEnvironment()
+
+ # Make sure the forwarder is still running.
+ self.RestartHttpServerForwarderIfNecessary()
+
+ def _IsPerfTest(self, test):
+ """Determines whether a test is a performance test.
+
+ Args:
+ test: The name of the test to be checked.
+
+ Returns:
+ Whether the test is annotated as a performance test.
+ """
+ return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test)
+
+ def SetupPerfMonitoringIfNeeded(self, test):
+ """Sets up performance monitoring if the specified test requires it.
+
+ Args:
+ test: The name of the test to be run.
+ """
+ if not self._IsPerfTest(test):
+ return
+ self.adb.Adb().SendCommand('shell rm ' +
+ TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
+ self.adb.StartMonitoringLogcat()
+
+ def TestTeardown(self, test, test_result):
+ """Cleans up the test harness after running a particular test.
+
+ Depending on the options of this TestRunner this might handle coverage
+ tracking or performance tracking. This method will only be called if the
+ test passed.
+
+ Args:
+ test: The name of the test that was just run.
+ test_result: result for this test.
+ """
+
+ self.tool.CleanUpEnvironment()
+
+ # The logic below relies on the test passing.
+ if not test_result or test_result.GetStatusCode():
+ return
+
+ self.TearDownPerfMonitoring(test)
+ self.SaveCoverageData(test)
+
+ def TearDownPerfMonitoring(self, test):
+ """Cleans up performance monitoring if the specified test required it.
+
+ Args:
+ test: The name of the test that was just run.
+ Raises:
+ FatalTestException: if there's anything wrong with the perf data.
+ """
+ if not self._IsPerfTest(test):
+ return
+ raw_test_name = test.split('#')[1]
+
+ # Wait and grab annotation data so we can figure out which traces to parse
+ regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' +
+ raw_test_name +
+ '\)\:(.*)'), None)
+
+ # If the test is set to run on a specific device type only (IE: only
+ # tablet or phone) and it is being run on the wrong device, the test
+ # just quits and does not do anything. The java test harness will still
+ # print the appropriate annotation for us, but will add --NORUN-- for
+ # us so we know to ignore the results.
+ # The --NORUN-- tag is managed by MainActivityTestBase.java
+ if regex.group(1) != '--NORUN--':
+
+ # Obtain the relevant perf data. The data is dumped to a
+ # JSON formatted file.
+ json_string = self.adb.GetFileContents(
+ '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
+
+ if json_string:
+ json_string = '\n'.join(json_string)
+ else:
+ raise FatalTestException('Perf file does not exist or is empty')
+
+ if self.save_perf_json:
+ json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
+ with open(json_local_file, 'w') as f:
+ f.write(json_string)
+ logging.info('Saving Perf UI JSON from test ' +
+ test + ' to ' + json_local_file)
+
+ raw_perf_data = regex.group(1).split(';')
+
+ for raw_perf_set in raw_perf_data:
+ if raw_perf_set:
+ perf_set = raw_perf_set.split(',')
+ if len(perf_set) != 3:
+ raise FatalTestException('Unexpected number of tokens in '
+ 'perf annotation string: ' + raw_perf_set)
+
+ # Process the performance data
+ result = GetAverageRunInfoFromJSONString(json_string, perf_set[0])
+
+ PrintPerfResult(perf_set[1], perf_set[2],
+ [result['average']], result['units'])
+
+ def _SetupIndividualTestTimeoutScale(self, test):
+ timeout_scale = self._GetIndividualTestTimeoutScale(test)
+ valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale)
+
+ def _GetIndividualTestTimeoutScale(self, test):
+ """Returns the timeout scale for the given |test|."""
+ annotations = self.apks[0].GetTestAnnotations(test)
+ timeout_scale = 1
+ if 'TimeoutScale' in annotations:
+ for annotation in annotations:
+ scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
+ if scale_match:
+ timeout_scale = int(scale_match.group(1))
+ if self.wait_for_debugger:
+ timeout_scale *= 100
+ return timeout_scale
+
+ def _GetIndividualTestTimeoutSecs(self, test):
+ """Returns the timeout in seconds for the given |test|."""
+ annotations = self.apks[0].GetTestAnnotations(test)
+ if 'Manual' in annotations:
+ return 600 * 60
+ if 'External' in annotations:
+ return 10 * 60
+ if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
+ return 5 * 60
+ if 'MediumTest' in annotations:
+ return 3 * 60
+ return 1 * 60
+
+ def RunTests(self):
+ """Runs the tests, generating the coverage if needed.
+
+ Returns:
+ A TestResults object.
+ """
+ instrumentation_path = (self.instrumentation_class_path +
+ '/android.test.InstrumentationTestRunner')
+ instrumentation_args = self._GetInstrumentationArgs()
+ for test in self._GetTestsIter():
+ test_result = None
+ start_date_ms = None
+ try:
+ self.TestSetup(test)
+ start_date_ms = int(time.time()) * 1000
+ args_with_filter = dict(instrumentation_args)
+ args_with_filter['class'] = test
+ # |test_results| is a list that should contain
+ # a single TestResult object.
+ logging.warn(args_with_filter)
+ (test_results, _) = self.adb.Adb().StartInstrumentation(
+ instrumentation_path=instrumentation_path,
+ instrumentation_args=args_with_filter,
+ timeout_time=(self._GetIndividualTestTimeoutSecs(test) *
+ self._GetIndividualTestTimeoutScale(test) *
+ self.tool.GetTimeoutScale()))
+ duration_ms = int(time.time()) * 1000 - start_date_ms
+ assert len(test_results) == 1
+ test_result = test_results[0]
+ status_code = test_result.GetStatusCode()
+ if status_code:
+ log = test_result.GetFailureReason()
+ if not log:
+ log = 'No information.'
+ if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0:
+ self._TakeScreenshot(test)
+ self.test_results.failed += [SingleTestResult(test, start_date_ms,
+ duration_ms, log)]
+ else:
+ result = [SingleTestResult(test, start_date_ms, duration_ms)]
+ self.test_results.ok += result
+ # Catch exceptions thrown by StartInstrumentation().
+ # See ../../third_party/android/testrunner/adb_interface.py
+ except (errors.WaitForResponseTimedOutError,
+ errors.DeviceUnresponsiveError,
+ errors.InstrumentationError), e:
+ if start_date_ms:
+ duration_ms = int(time.time()) * 1000 - start_date_ms
+ else:
+ start_date_ms = int(time.time()) * 1000
+ duration_ms = 0
+ message = str(e)
+ if not message:
+ message = 'No information.'
+ self.test_results.crashed += [SingleTestResult(test, start_date_ms,
+ duration_ms,
+ message)]
+ test_result = None
+ self.TestTeardown(test, test_result)
+ return self.test_results
+
+
+class TestSharder(BaseTestSharder):
+ """Responsible for sharding the tests on the connected devices."""
+
+ def __init__(self, attached_devices, options, tests, apks):
+ BaseTestSharder.__init__(self, attached_devices)
+ self.options = options
+ self.tests = tests
+ self.apks = apks
+
+ def SetupSharding(self, tests):
+ """Called before starting the shards."""
+ SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
+ len(self.attached_devices), tests))
+
+ def CreateShardedTestRunner(self, device, index):
+ """Creates a sharded test runner.
+
+ Args:
+ device: Device serial where this shard will run.
+ index: Index of this device in the pool.
+
+ Returns:
+ A TestRunner object.
+ """
+ return TestRunner(self.options, device, None, False, index, self.apks, [])
+
+
+def DispatchJavaTests(options, apks):
+ """Dispatches Java tests onto connected device(s).
+
+ If possible, this method will attempt to shard the tests to
+ all connected devices. Otherwise, dispatch and run tests on one device.
+
+ Args:
+ options: Command line options.
+ apks: list of APKs to use.
+
+ Returns:
+ A TestResults object holding the results of the Java tests.
+
+ Raises:
+ FatalTestException: when there's no attached the devices.
+ """
+ test_apk = apks[0]
+ if options.annotation:
+ available_tests = test_apk.GetAnnotatedTests(options.annotation)
+ if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest':
+ tests_without_annotation = [
+ m for m in
+ test_apk.GetTestMethods()
+ if not test_apk.GetTestAnnotations(m) and
+ not apk_info.ApkInfo.IsPythonDrivenTest(m)]
+ if tests_without_annotation:
+ tests_without_annotation.sort()
+ logging.warning('The following tests do not contain any annotation. '
+ 'Assuming "SmallTest":\n%s',
+ '\n'.join(tests_without_annotation))
+ available_tests += tests_without_annotation
+ else:
+ available_tests = [m for m in test_apk.GetTestMethods()
+ if not apk_info.ApkInfo.IsPythonDrivenTest(m)]
+ coverage = os.environ.get('EMMA_INSTRUMENT') == 'true'
+
+ tests = []
+ if options.test_filter:
+ # |available_tests| are in adb instrument format: package.path.class#test.
+ filter_without_hash = options.test_filter.replace('#', '.')
+ tests = [t for t in available_tests
+ if filter_without_hash in t.replace('#', '.')]
+ else:
+ tests = available_tests
+
+ if not tests:
+ logging.warning('No Java tests to run with current args.')
+ return TestResults()
+
+ tests *= options.number_of_runs
+
+ attached_devices = android_commands.GetAttachedDevices()
+ test_results = TestResults()
+
+ if not attached_devices:
+ raise FatalTestException('You have no devices attached or visible!')
+ if options.device:
+ attached_devices = [options.device]
+
+ logging.info('Will run: %s', str(tests))
+
+ if len(attached_devices) > 1 and (coverage or options.wait_for_debugger):
+ logging.warning('Coverage / debugger can not be sharded, '
+ 'using first available device')
+ attached_devices = attached_devices[:1]
+ sharder = TestSharder(attached_devices, options, tests, apks)
+ test_results = sharder.RunShardedTests()
+ return test_results
diff --git a/media/webrtc/trunk/build/android/pylib/run_python_tests.py b/media/webrtc/trunk/build/android/pylib/run_python_tests.py
new file mode 100644
index 000000000..7d39f4829
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/run_python_tests.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Runs the Python tests (relies on using the Java test runner)."""
+
+import logging
+import os
+import sys
+import types
+
+import android_commands
+import apk_info
+import constants
+import python_test_base
+from python_test_caller import CallPythonTest
+from python_test_sharder import PythonTestSharder
+import run_java_tests
+from run_java_tests import FatalTestException
+from test_info_collection import TestInfoCollection
+from test_result import TestResults
+
+
+def _GetPythonFiles(root, files):
+ """Returns all files from |files| that end in 'Test.py'.
+
+ Args:
+ root: A directory name with python files.
+ files: A list of file names.
+
+ Returns:
+ A list with all Python driven test file paths.
+ """
+ return [os.path.join(root, f) for f in files if f.endswith('Test.py')]
+
+
+def _InferImportNameFromFile(python_file):
+ """Given a file, infer the import name for that file.
+
+ Example: /usr/foo/bar/baz.py -> baz.
+
+ Args:
+ python_file: path to the Python file, ostensibly to import later.
+
+ Returns:
+ The module name for the given file.
+ """
+ return os.path.splitext(os.path.basename(python_file))[0]
+
+
+def DispatchPythonTests(options):
+ """Dispatches the Python tests. If there are multiple devices, use sharding.
+
+ Args:
+ options: command line options.
+
+ Returns:
+ A list of test results.
+ """
+
+ attached_devices = android_commands.GetAttachedDevices()
+ if not attached_devices:
+ raise FatalTestException('You have no devices attached or visible!')
+ if options.device:
+ attached_devices = [options.device]
+
+ test_collection = TestInfoCollection()
+ all_tests = _GetAllTests(options.python_test_root, options.official_build)
+ test_collection.AddTests(all_tests)
+ test_names = [t.qualified_name for t in all_tests]
+ logging.debug('All available tests: ' + str(test_names))
+
+ available_tests = test_collection.GetAvailableTests(
+ options.annotation, options.test_filter)
+
+ if not available_tests:
+ logging.warning('No Python tests to run with current args.')
+ return TestResults()
+
+ available_tests *= options.number_of_runs
+ test_names = [t.qualified_name for t in available_tests]
+ logging.debug('Final list of tests to run: ' + str(test_names))
+
+ # Copy files to each device before running any tests.
+ for device_id in attached_devices:
+ logging.debug('Pushing files to device %s', device_id)
+ apks = [apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)]
+ test_files_copier = run_java_tests.TestRunner(options, device_id,
+ None, False, 0, apks, [])
+ test_files_copier.CopyTestFilesOnce()
+
+ # Actually run the tests.
+ if len(attached_devices) > 1 and options.wait_for_debugger:
+ logging.warning('Debugger can not be sharded, '
+ 'using first available device')
+ attached_devices = attached_devices[:1]
+ logging.debug('Running Python tests')
+ sharder = PythonTestSharder(attached_devices, available_tests, options)
+ test_results = sharder.RunShardedTests()
+
+ return test_results
+
+
+def _GetTestModules(python_test_root, is_official_build):
+ """Retrieve a sorted list of pythonDrivenTests.
+
+ Walks the location of pythonDrivenTests, imports them, and provides the list
+ of imported modules to the caller.
+
+ Args:
+ python_test_root: the path to walk, looking for pythonDrivenTests
+ is_official_build: whether to run only those tests marked 'official'
+
+ Returns:
+ A list of Python modules which may have zero or more tests.
+ """
+ # By default run all python tests under pythonDrivenTests.
+ python_test_file_list = []
+ for root, _, files in os.walk(python_test_root):
+ if (root.endswith('pythonDrivenTests')
+ or (is_official_build
+ and root.endswith('pythonDrivenTests/official'))):
+ python_test_file_list += _GetPythonFiles(root, files)
+ python_test_file_list.sort()
+
+ test_module_list = [_GetModuleFromFile(test_file)
+ for test_file in python_test_file_list]
+ return test_module_list
+
+
+def _GetModuleFromFile(python_file):
+ """Gets the module associated with a file by importing it.
+
+ Args:
+ python_file: file to import
+
+ Returns:
+ The module object.
+ """
+ sys.path.append(os.path.dirname(python_file))
+ import_name = _InferImportNameFromFile(python_file)
+ return __import__(import_name)
+
+
+def _GetTestsFromClass(test_class):
+ """Create a list of test objects for each test method on this class.
+
+ Test methods are methods on the class which begin with 'test'.
+
+ Args:
+ test_class: class object which contains zero or more test methods.
+
+ Returns:
+ A list of test objects, each of which is bound to one test.
+ """
+ test_names = [m for m in dir(test_class)
+ if _IsTestMethod(m, test_class)]
+ return map(test_class, test_names)
+
+
+def _GetTestClassesFromModule(test_module):
+ tests = []
+ for name in dir(test_module):
+ attr = getattr(test_module, name)
+ if _IsTestClass(attr):
+ tests.extend(_GetTestsFromClass(attr))
+ return tests
+
+
+def _IsTestClass(test_class):
+ return (type(test_class) is types.TypeType and
+ issubclass(test_class, python_test_base.PythonTestBase) and
+ test_class is not python_test_base.PythonTestBase)
+
+
+def _IsTestMethod(attrname, test_case_class):
+ """Checks whether this is a valid test method.
+
+ Args:
+ attrname: the method name.
+ test_case_class: the test case class.
+
+ Returns:
+ True if test_case_class.'attrname' is callable and it starts with 'test';
+ False otherwise.
+ """
+ attr = getattr(test_case_class, attrname)
+ return callable(attr) and attrname.startswith('test')
+
+
+def _GetAllTests(test_root, is_official_build):
+ """Retrieve a list of Python test modules and their respective methods.
+
+ Args:
+ test_root: path which contains Python-driven test files
+ is_official_build: whether this is an official build
+
+ Returns:
+ List of test case objects for all available test methods.
+ """
+ if not test_root:
+ return []
+ all_tests = []
+ test_module_list = _GetTestModules(test_root, is_official_build)
+ for module in test_module_list:
+ all_tests.extend(_GetTestClassesFromModule(module))
+ return all_tests
diff --git a/media/webrtc/trunk/build/android/pylib/run_tests_helper.py b/media/webrtc/trunk/build/android/pylib/run_tests_helper.py
new file mode 100644
index 000000000..15e5d5381
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/run_tests_helper.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper functions common to native, java and python test runners."""
+
+import logging
+import os
+
+
+def GetExpectations(file_name):
+ """Returns a list of test names in the |file_name| test expectations file."""
+ if not file_name or not os.path.exists(file_name):
+ return []
+ return [x for x in [x.strip() for x in file(file_name).readlines()]
+ if x and x[0] != '#']
+
+
+def SetLogLevel(verbose_count):
+ """Sets log level as |verbose_count|."""
+ log_level = logging.WARNING # Default.
+ if verbose_count == 1:
+ log_level = logging.INFO
+ elif verbose_count >= 2:
+ log_level = logging.DEBUG
+ logging.getLogger().setLevel(log_level)
diff --git a/media/webrtc/trunk/build/android/pylib/sharded_tests_queue.py b/media/webrtc/trunk/build/android/pylib/sharded_tests_queue.py
new file mode 100644
index 000000000..9e28e2c48
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/sharded_tests_queue.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""A module that contains a queue for running sharded tests."""
+
+import multiprocessing
+
+
+class ShardedTestsQueue(object):
+ """A queue for managing pending tests across different runners.
+
+ This class should only be used when sharding.
+
+ Attributes:
+ num_devices: an integer; the number of attached Android devices.
+ tests: a list of tests to be run.
+ tests_queue: if sharding, a JoinableQueue object that holds tests from
+ |tests|. Otherwise, a list holding tests.
+ results_queue: a Queue object to hold TestResults objects.
+ """
+ _STOP_SENTINEL = 'STOP' # sentinel value for iter()
+
+ def __init__(self, num_devices, tests):
+ self.num_devices = num_devices
+ self.tests_queue = multiprocessing.Queue()
+ for test in tests:
+ self.tests_queue.put(test)
+ for _ in xrange(self.num_devices):
+ self.tests_queue.put(ShardedTestsQueue._STOP_SENTINEL)
+
+ def __iter__(self):
+ """Returns an iterator with the test cases."""
+ return iter(self.tests_queue.get, ShardedTestsQueue._STOP_SENTINEL)
diff --git a/media/webrtc/trunk/build/android/pylib/single_test_runner.py b/media/webrtc/trunk/build/android/pylib/single_test_runner.py
new file mode 100644
index 000000000..a680c68f1
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/single_test_runner.py
@@ -0,0 +1,343 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import glob
+import logging
+import os
+import sys
+
+from base_test_runner import BaseTestRunner
+import debug_info
+import constants
+import perf_tests_helper
+import run_tests_helper
+from test_package_apk import TestPackageApk
+from test_package_executable import TestPackageExecutable
+from test_result import TestResults
+
+
+class SingleTestRunner(BaseTestRunner):
+ """Single test suite attached to a single device.
+
+ Args:
+ device: Device to run the tests.
+ test_suite: A specific test suite to run, empty to run all.
+ gtest_filter: A gtest_filter flag.
+ test_arguments: Additional arguments to pass to the test binary.
+ timeout: Timeout for each test.
+ rebaseline: Whether or not to run tests in isolation and update the filter.
+ performance_test: Whether or not performance test(s).
+ cleanup_test_files: Whether or not to cleanup test files on device.
+ tool: Name of the Valgrind tool.
+ shard_index: index number of the shard on which the test suite will run.
+ dump_debug_info: Whether or not to dump debug information.
+ build_type: 'Release' or 'Debug'.
+ """
+
+ def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout,
+ rebaseline, performance_test, cleanup_test_files, tool_name,
+ shard_index, dump_debug_info, fast_and_loose, build_type):
+ BaseTestRunner.__init__(self, device, tool_name, shard_index, build_type)
+ self._running_on_emulator = self.device.startswith('emulator')
+ self._gtest_filter = gtest_filter
+ self._test_arguments = test_arguments
+ self.test_results = TestResults()
+ if dump_debug_info:
+ self.dump_debug_info = debug_info.GTestDebugInfo(self.adb, device,
+ os.path.basename(test_suite), gtest_filter)
+ else:
+ self.dump_debug_info = None
+ self.fast_and_loose = fast_and_loose
+
+ logging.warning('Test suite: ' + test_suite)
+ if os.path.splitext(test_suite)[1] == '.apk':
+ self.test_package = TestPackageApk(self.adb, device,
+ test_suite, timeout, rebaseline, performance_test, cleanup_test_files,
+ self.tool, self.dump_debug_info)
+ else:
+ self.test_package = TestPackageExecutable(
+ self.adb, device,
+ test_suite, timeout, rebaseline, performance_test, cleanup_test_files,
+ self.tool, self.dump_debug_info)
+ self._performance_test_setup = None
+ if performance_test:
+ self._performance_test_setup = perf_tests_helper.PerfTestSetup(self.adb)
+
+ def _TestSuiteRequiresMockTestServer(self):
+ """Returns True if the test suite requires mock test server."""
+ return False
+ # TODO(yfriedman): Disabled because of flakiness.
+ # (self.test_package.test_suite_basename == 'unit_tests' or
+ # self.test_package.test_suite_basename == 'net_unittests' or
+ # False)
+
+ def _GetFilterFileName(self):
+ """Returns the filename of gtest filter."""
+ return os.path.join(sys.path[0], 'gtest_filter',
+ self.test_package.test_suite_basename + '_disabled')
+
+ def _GetAdditionalEmulatorFilterName(self):
+ """Returns the filename of additional gtest filter for emulator."""
+ return os.path.join(sys.path[0], 'gtest_filter',
+ self.test_package.test_suite_basename +
+ '_emulator_additional_disabled')
+
+ def GetDisabledTests(self):
+ """Returns a list of disabled tests.
+
+ Returns:
+ A list of disabled tests obtained from gtest_filter/test_suite_disabled.
+ """
+ disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName())
+ if self._running_on_emulator:
+ # Append emulator's filter file.
+ disabled_tests.extend(run_tests_helper.GetExpectations(
+ self._GetAdditionalEmulatorFilterName()))
+ return disabled_tests
+
+ def UpdateFilter(self, failed_tests):
+ """Updates test_suite_disabled file with the new filter (deletes if empty).
+
+ If running in Emulator, only the failed tests which are not in the normal
+ filter returned by _GetFilterFileName() are written to emulator's
+ additional filter file.
+
+ Args:
+ failed_tests: A sorted list of failed tests.
+ """
+ disabled_tests = []
+ if not self._running_on_emulator:
+ filter_file_name = self._GetFilterFileName()
+ else:
+ filter_file_name = self._GetAdditionalEmulatorFilterName()
+ disabled_tests.extend(
+ run_tests_helper.GetExpectations(self._GetFilterFileName()))
+ logging.info('About to update emulator\'s additional filter (%s).'
+ % filter_file_name)
+
+ new_failed_tests = []
+ if failed_tests:
+ for test in failed_tests:
+ if test.name not in disabled_tests:
+ new_failed_tests.append(test.name)
+
+ if not new_failed_tests:
+ if os.path.exists(filter_file_name):
+ os.unlink(filter_file_name)
+ return
+
+ filter_file = file(filter_file_name, 'w')
+ if self._running_on_emulator:
+ filter_file.write('# Addtional list of suppressions from emulator\n')
+ else:
+ filter_file.write('# List of suppressions\n')
+ filter_file.write('# This file was automatically generated by %s\n'
+ % sys.argv[0])
+ filter_file.write('\n'.join(sorted(new_failed_tests)))
+ filter_file.write('\n')
+ filter_file.close()
+
+ def GetDataFilesForTestSuite(self):
+ """Returns a list of data files/dirs needed by the test suite."""
+ # Ideally, we'd just push all test data. However, it has >100MB, and a lot
+ # of the files are not relevant (some are used for browser_tests, others for
+ # features not supported, etc..).
+ if self.test_package.test_suite_basename in ['base_unittests',
+ 'sql_unittests',
+ 'unit_tests']:
+ test_files = [
+ 'base/data/file_util_unittest',
+ 'base/data/json/bom_feff.json',
+ 'chrome/test/data/download-test1.lib',
+ 'chrome/test/data/extensions/bad_magic.crx',
+ 'chrome/test/data/extensions/good.crx',
+ 'chrome/test/data/extensions/icon1.png',
+ 'chrome/test/data/extensions/icon2.png',
+ 'chrome/test/data/extensions/icon3.png',
+ 'chrome/test/data/extensions/allow_silent_upgrade/',
+ 'chrome/test/data/extensions/app/',
+ 'chrome/test/data/extensions/bad/',
+ 'chrome/test/data/extensions/effective_host_permissions/',
+ 'chrome/test/data/extensions/empty_manifest/',
+ 'chrome/test/data/extensions/good/Extensions/',
+ 'chrome/test/data/extensions/manifest_tests/',
+ 'chrome/test/data/extensions/page_action/',
+ 'chrome/test/data/extensions/permissions/',
+ 'chrome/test/data/extensions/script_and_capture/',
+ 'chrome/test/data/extensions/unpacker/',
+ 'chrome/test/data/bookmarks/',
+ 'chrome/test/data/components/',
+ 'chrome/test/data/extensions/json_schema_test.js',
+ 'chrome/test/data/History/',
+ 'chrome/test/data/json_schema_validator/',
+ 'chrome/test/data/pref_service/',
+ 'chrome/test/data/serializer_nested_test.js',
+ 'chrome/test/data/serializer_test.js',
+ 'chrome/test/data/serializer_test_nowhitespace.js',
+ 'chrome/test/data/top_sites/',
+ 'chrome/test/data/web_app_info/',
+ 'chrome/test/data/web_database',
+ 'chrome/test/data/webui/',
+ 'chrome/test/data/zip',
+ 'chrome/third_party/mock4js/',
+ 'content/browser/gpu/software_rendering_list.json',
+ 'net/data/cache_tests/insert_load1',
+ 'net/data/cache_tests/dirty_entry5',
+ 'net/data/ssl/certificates/',
+ 'ui/base/test/data/data_pack_unittest',
+ ]
+ if self.test_package.test_suite_basename == 'unit_tests':
+ test_files += ['chrome/test/data/simple_open_search.xml']
+ # The following are spell check data. Now only list the data under
+ # third_party/hunspell_dictionaries which are used by unit tests.
+ old_cwd = os.getcwd()
+ os.chdir(constants.CHROME_DIR)
+ test_files += glob.glob('third_party/hunspell_dictionaries/*.bdic')
+ os.chdir(old_cwd)
+ return test_files
+ elif self.test_package.test_suite_basename == 'net_unittests':
+ return [
+ 'net/data/cache_tests',
+ 'net/data/filter_unittests',
+ 'net/data/ftp',
+ 'net/data/proxy_resolver_v8_unittest',
+ 'net/data/ssl/certificates',
+ 'net/data/url_request_unittest/',
+ 'net/data/proxy_script_fetcher_unittest'
+ ]
+ elif self.test_package.test_suite_basename == 'ui_tests':
+ return [
+ 'chrome/test/data/dromaeo',
+ 'chrome/test/data/json2.js',
+ 'chrome/test/data/sunspider',
+ 'chrome/test/data/v8_benchmark',
+ 'chrome/test/perf/sunspider_uitest.js',
+ 'chrome/test/perf/v8_benchmark_uitest.js',
+ ]
+ elif self.test_package.test_suite_basename == 'page_cycler_tests':
+ data = [
+ 'tools/page_cycler',
+ 'data/page_cycler',
+ ]
+ for d in data:
+ if not os.path.exists(d):
+ raise Exception('Page cycler data not found.')
+ return data
+ elif self.test_package.test_suite_basename == 'webkit_unit_tests':
+ return [
+ 'third_party/WebKit/Source/WebKit/chromium/tests/data',
+ ]
+ elif self.test_package.test_suite_basename == 'content_unittests':
+ return [
+ 'content/test/data/gpu/webgl_conformance_test_expectations.txt',
+ 'net/data/ssl/certificates/',
+ 'webkit/data/dom_storage/webcore_test_database.localstorage',
+ 'third_party/hyphen/hyph_en_US.dic',
+ ]
+ elif self.test_package.test_suite_basename == 'media_unittests':
+ return [
+ 'media/test/data',
+ ]
+ return []
+
+ def LaunchHelperToolsForTestSuite(self):
+ """Launches helper tools for the test suite.
+
+ Sometimes one test may need to run some helper tools first in order to
+ successfully complete the test.
+ """
+ if self._TestSuiteRequiresMockTestServer():
+ self.LaunchChromeTestServerSpawner()
+
+ def StripAndCopyFiles(self):
+ """Strips and copies the required data files for the test suite."""
+ self.test_package.StripAndCopyExecutable()
+ self.test_package.PushDataAndPakFiles()
+ self.tool.CopyFiles()
+ test_data = self.GetDataFilesForTestSuite()
+ if test_data and not self.fast_and_loose:
+ # Make sure SD card is ready.
+ self.adb.WaitForSdCardReady(20)
+ for data in test_data:
+ self.CopyTestData([data], self.adb.GetExternalStorage())
+
+ def RunTestsWithFilter(self):
+ """Runs a tests via a small, temporary shell script."""
+ self.test_package.CreateTestRunnerScript(self._gtest_filter,
+ self._test_arguments)
+ self.test_results = self.test_package.RunTestsAndListResults()
+
+ def RebaselineTests(self):
+ """Runs all available tests, restarting in case of failures."""
+ if self._gtest_filter:
+ all_tests = set(self._gtest_filter.split(':'))
+ else:
+ all_tests = set(self.test_package.GetAllTests())
+ failed_results = set()
+ executed_results = set()
+ while True:
+ executed_names = set([f.name for f in executed_results])
+ self._gtest_filter = ':'.join(all_tests - executed_names)
+ self.RunTestsWithFilter()
+ failed_results.update(self.test_results.crashed,
+ self.test_results.failed)
+ executed_results.update(self.test_results.crashed,
+ self.test_results.failed,
+ self.test_results.ok)
+ executed_names = set([f.name for f in executed_results])
+ logging.info('*' * 80)
+ logging.info(self.device)
+ logging.info('Executed: ' + str(len(executed_names)) + ' of ' +
+ str(len(all_tests)))
+ logging.info('Failed so far: ' + str(len(failed_results)) + ' ' +
+ str([f.name for f in failed_results]))
+ logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' +
+ str(all_tests - executed_names))
+ logging.info('*' * 80)
+ if executed_names == all_tests:
+ break
+ self.test_results = TestResults.FromRun(
+ ok=list(executed_results - failed_results),
+ failed=list(failed_results))
+
+ def RunTests(self):
+ """Runs all tests (in rebaseline mode, runs each test in isolation).
+
+ Returns:
+ A TestResults object.
+ """
+ if self.test_package.rebaseline:
+ self.RebaselineTests()
+ else:
+ if not self._gtest_filter:
+ self._gtest_filter = ('-' + ':'.join(self.GetDisabledTests()) + ':' +
+ ':'.join(['*.' + x + '*' for x in
+ self.test_package.GetDisabledPrefixes()]))
+ self.RunTestsWithFilter()
+ return self.test_results
+
+ def SetUp(self):
+ """Sets up necessary test enviroment for the test suite."""
+ super(SingleTestRunner, self).SetUp()
+ self.adb.ClearApplicationState(constants.CHROME_PACKAGE)
+ if self._performance_test_setup:
+ self._performance_test_setup.SetUp()
+ if self.dump_debug_info:
+ self.dump_debug_info.StartRecordingLog(True)
+ self.StripAndCopyFiles()
+ self.LaunchHelperToolsForTestSuite()
+ self.tool.SetupEnvironment()
+
+ def TearDown(self):
+ """Cleans up the test enviroment for the test suite."""
+ self.tool.CleanUpEnvironment()
+ if self.test_package.cleanup_test_files:
+ self.adb.RemovePushedFiles()
+ if self.dump_debug_info:
+ self.dump_debug_info.StopRecordingLog()
+ if self._performance_test_setup:
+ self._performance_test_setup.TearDown()
+ if self.dump_debug_info:
+ self.dump_debug_info.ArchiveNewCrashFiles()
+ super(SingleTestRunner, self).TearDown()
diff --git a/media/webrtc/trunk/build/android/pylib/test_info_collection.py b/media/webrtc/trunk/build/android/pylib/test_info_collection.py
new file mode 100644
index 000000000..fc4e80694
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/test_info_collection.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module containing information about the python-driven tests."""
+
+import logging
+import os
+
+import tests_annotations
+
+
+class TestInfo(object):
+ """An object containing and representing a test function, plus metadata."""
+
+ def __init__(self, runnable, set_up=None, tear_down=None):
+ # The actual test function/method.
+ self.runnable = runnable
+ # Qualified name of test function/method (e.g. FooModule.testBar).
+ self.qualified_name = self._GetQualifiedName(runnable)
+ # setUp and teardown functions, if any.
+ self.set_up = set_up
+ self.tear_down = tear_down
+
+ def _GetQualifiedName(self, runnable):
+ """Helper method to infer a runnable's name and module name.
+
+ Many filters and lists presuppose a format of module_name.testMethodName.
+ To make this easy on everyone, we use some reflection magic to infer this
+ name automatically.
+
+ Args:
+ runnable: the test method to get the qualified name for
+
+ Returns:
+ qualified name for this runnable, incl. module name and method name.
+ """
+ runnable_name = runnable.__name__
+ # See also tests_annotations.
+ module_name = os.path.splitext(
+ os.path.basename(runnable.__globals__['__file__']))[0]
+ return '.'.join([module_name, runnable_name])
+
+ def __str__(self):
+ return self.qualified_name
+
+
+class TestInfoCollection(object):
+ """A collection of TestInfo objects which facilitates filtering."""
+
+ def __init__(self):
+ """Initialize a new TestInfoCollection."""
+ # Master list of all valid tests.
+ self.all_tests = []
+
+ def AddTests(self, test_infos):
+ """Adds a set of tests to this collection.
+
+ The user may then retrieve them, optionally according to criteria, via
+ GetAvailableTests().
+
+ Args:
+ test_infos: a list of TestInfos representing test functions/methods.
+ """
+ self.all_tests = test_infos
+
+ def GetAvailableTests(self, annotation, name_filter):
+ """Get a collection of TestInfos which match the supplied criteria.
+
+ Args:
+ annotation: annotation which tests must match, if any
+ name_filter: name filter which tests must match, if any
+
+ Returns:
+ List of available tests.
+ """
+ available_tests = self.all_tests
+
+ # Filter out tests which match neither the requested annotation, nor the
+ # requested name filter, if any.
+ available_tests = [t for t in available_tests if
+ self._AnnotationIncludesTest(t, annotation)]
+ if annotation and len(annotation) == 1 and annotation[0] == 'SmallTest':
+ tests_without_annotation = [
+ t for t in self.all_tests if
+ not tests_annotations.AnnotatedFunctions.GetTestAnnotations(
+ t.qualified_name)]
+ test_names = [t.qualified_name for t in tests_without_annotation]
+ logging.warning('The following tests do not contain any annotation. '
+ 'Assuming "SmallTest":\n%s',
+ '\n'.join(test_names))
+ available_tests += tests_without_annotation
+ available_tests = [t for t in available_tests if
+ self._NameFilterIncludesTest(t, name_filter)]
+
+ return available_tests
+
+ def _AnnotationIncludesTest(self, test_info, annotation_filter_list):
+ """Checks whether a given test represented by test_info matches annotation.
+
+ Args:
+ test_info: TestInfo object representing the test
+ annotation_filter_list: list of annotation filters to match (e.g. Smoke)
+
+ Returns:
+ True if no annotation was supplied or the test matches; false otherwise.
+ """
+ if not annotation_filter_list:
+ return True
+ for annotation_filter in annotation_filter_list:
+ filters = annotation_filter.split('=')
+ if len(filters) == 2:
+ key = filters[0]
+ value_list = filters[1].split(',')
+ for value in value_list:
+ if tests_annotations.AnnotatedFunctions.IsAnnotated(
+ key + ':' + value, test_info.qualified_name):
+ return True
+ elif tests_annotations.AnnotatedFunctions.IsAnnotated(
+ annotation_filter, test_info.qualified_name):
+ return True
+ return False
+
+ def _NameFilterIncludesTest(self, test_info, name_filter):
+ """Checks whether a name filter matches a given test_info's method name.
+
+ This is a case-sensitive, substring comparison: 'Foo' will match methods
+ Foo.testBar and Bar.testFoo. 'foo' would not match either.
+
+ Args:
+ test_info: TestInfo object representing the test
+ name_filter: substring to check for in the qualified name of the test
+
+ Returns:
+ True if no name filter supplied or it matches; False otherwise.
+ """
+ return not name_filter or name_filter in test_info.qualified_name
diff --git a/media/webrtc/trunk/build/android/pylib/test_options_parser.py b/media/webrtc/trunk/build/android/pylib/test_options_parser.py
new file mode 100644
index 000000000..ee00f1fff
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/test_options_parser.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Parses options for the instrumentation tests."""
+
+import constants
+import optparse
+import os
+import sys
+
+_SDK_OUT_DIR = os.path.join(constants.CHROME_DIR, 'out')
+
+
+def AddBuildTypeOption(option_parser):
+ """Decorates OptionParser with build type option."""
+ default_build_type = 'Debug'
+ if 'BUILDTYPE' in os.environ:
+ default_build_type = os.environ['BUILDTYPE']
+ option_parser.add_option('--debug', action='store_const', const='Debug',
+ dest='build_type', default=default_build_type,
+ help='If set, run test suites under out/Debug. '
+ 'Default is env var BUILDTYPE or Debug')
+ option_parser.add_option('--release', action='store_const', const='Release',
+ dest='build_type',
+ help='If set, run test suites under out/Release. '
+ 'Default is env var BUILDTYPE or Debug.')
+
+def AddInstallAPKOption(option_parser):
+ """Decorates OptionParser with apk option used to install the APK."""
+ option_parser.add_option('--apk',
+ help=('The name of the apk containing the '
+ ' application (with the .apk extension).'))
+ option_parser.add_option('--apk_package',
+ help=('The package name used by the apk containing '
+ 'the application.'))
+
+def AddTestRunnerOptions(option_parser, default_timeout=60):
+ """Decorates OptionParser with options applicable to all tests."""
+
+ option_parser.add_option('-t', dest='timeout',
+ help='Timeout to wait for each test',
+ type='int',
+ default=default_timeout)
+ option_parser.add_option('-c', dest='cleanup_test_files',
+ help='Cleanup test files on the device after run',
+ action='store_true')
+ option_parser.add_option('-v',
+ '--verbose',
+ dest='verbose_count',
+ default=0,
+ action='count',
+ help='Verbose level (multiple times for more)')
+ profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps',
+ 'traceview']
+ option_parser.add_option('--profiler', dest='profilers', action='append',
+ choices=profilers,
+ help='Profiling tool to run during test. '
+ 'Pass multiple times to run multiple profilers. '
+ 'Available profilers: %s' % profilers)
+ option_parser.add_option('--tool',
+ dest='tool',
+ help='Run the test under a tool '
+ '(use --tool help to list them)')
+ AddBuildTypeOption(option_parser)
+
+
+def AddInstrumentationOptions(option_parser):
+ """Decorates OptionParser with instrumentation tests options."""
+
+ AddTestRunnerOptions(option_parser)
+ option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
+ action='store_true', help='Wait for debugger.')
+ option_parser.add_option('-I', dest='install_apk', help='Install APK.',
+ action='store_true')
+ option_parser.add_option('-f', '--test_filter',
+ help='Test filter (if not fully qualified, '
+ 'will run all matches).')
+ option_parser.add_option('-A', '--annotation', dest='annotation_str',
+ help=('Run only tests with any of the given '
+ 'annotations. '
+ 'An annotation can be either a key or a '
+ 'key-values pair. '
+ 'A test that has no annotation is '
+ 'considered "SmallTest".'))
+ option_parser.add_option('-j', '--java_only', action='store_true',
+ help='Run only the Java tests.')
+ option_parser.add_option('-p', '--python_only', action='store_true',
+ help='Run only the Python tests.')
+ option_parser.add_option('-n', '--run_count', type='int',
+ dest='number_of_runs', default=1,
+ help=('How many times to run each test, regardless '
+ 'of the result. (Default is 1)'))
+ option_parser.add_option('--test-apk', dest='test_apk',
+ help=('The name of the apk containing the tests '
+ '(without the .apk extension). For SDK '
+ 'builds, the apk name without the debug '
+ 'suffix(for example, ContentShellTest).'))
+ option_parser.add_option('--screenshot', dest='screenshot_failures',
+ action='store_true',
+ help='Capture screenshots of test failures')
+ option_parser.add_option('--save-perf-json', action='store_true',
+ help='Saves the JSON file for each UI Perf test.')
+ option_parser.add_option('--shard_retries', type=int, default=1,
+ help=('Number of times to retry each failure when '
+ 'sharding.'))
+ option_parser.add_option('--official-build', help='Run official build tests.')
+ option_parser.add_option('--device',
+ help='Serial number of device we should use.')
+ option_parser.add_option('--python_test_root',
+ help='Root of the python-driven tests.')
+
+def ValidateInstrumentationOptions(option_parser, options, args):
+ """Validate options/arguments and populate options with defaults."""
+ if len(args) > 1:
+ option_parser.print_help(sys.stderr)
+ option_parser.error('Unknown arguments: %s' % args[1:])
+ if options.java_only and options.python_only:
+ option_parser.error('Options java_only (-j) and python_only (-p) '
+ 'are mutually exclusive.')
+
+ options.run_java_tests = True
+ options.run_python_tests = True
+ if options.java_only:
+ options.run_python_tests = False
+ elif options.python_only:
+ options.run_java_tests = False
+
+ # In case of SDK Build, the jars and apks have a -debug suffix.
+ options.test_apk_path = os.path.join(_SDK_OUT_DIR,
+ options.build_type,
+ constants.SDK_BUILD_APKS_DIR,
+ '%s-debug.apk' % options.test_apk)
+ options.test_apk_jar_path = os.path.join(_SDK_OUT_DIR,
+ options.build_type,
+ constants.SDK_BUILD_TEST_JAVALIB_DIR,
+ '%s-debug.jar' % options.test_apk)
+ if options.annotation_str:
+ options.annotation = options.annotation_str.split()
+ elif options.test_filter:
+ options.annotation = []
+ else:
+ options.annotation = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest']
diff --git a/media/webrtc/trunk/build/android/pylib/test_package.py b/media/webrtc/trunk/build/android/pylib/test_package.py
new file mode 100644
index 000000000..a47ed72d8
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/test_package.py
@@ -0,0 +1,200 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+import re
+import os
+
+import constants
+from perf_tests_helper import PrintPerfResult
+from pylib import pexpect
+from test_result import BaseTestResult, TestResults
+
+
+# TODO(bulach): TestPackage, TestPackageExecutable and
+# TestPackageApk are a work in progress related to making the native tests
+# run as a NDK-app from an APK rather than a stand-alone executable.
+class TestPackage(object):
+ """A helper base class for both APK and stand-alone executables.
+
+ Args:
+ adb: ADB interface the tests are using.
+ device: Device to run the tests.
+ test_suite: A specific test suite to run, empty to run all.
+ timeout: Timeout for each test.
+ rebaseline: Whether or not to run tests in isolation and update the filter.
+ performance_test: Whether or not performance test(s).
+ cleanup_test_files: Whether or not to cleanup test files on device.
+ tool: Name of the Valgrind tool.
+ dump_debug_info: A debug_info object.
+ """
+
+ def __init__(self, adb, device, test_suite, timeout, rebaseline,
+ performance_test, cleanup_test_files, tool, dump_debug_info):
+ self.adb = adb
+ self.device = device
+ self.test_suite_full = test_suite
+ self.test_suite = os.path.splitext(test_suite)[0]
+ self.test_suite_basename = self._GetTestSuiteBaseName()
+ self.test_suite_dirname = os.path.dirname(
+ self.test_suite.split(self.test_suite_basename)[0])
+ self.rebaseline = rebaseline
+ self.performance_test = performance_test
+ self.cleanup_test_files = cleanup_test_files
+ self.tool = tool
+ if timeout == 0:
+ timeout = 60
+ # On a VM (e.g. chromium buildbots), this timeout is way too small.
+ if os.environ.get('BUILDBOT_SLAVENAME'):
+ timeout = timeout * 2
+ self.timeout = timeout * self.tool.GetTimeoutScale()
+ self.dump_debug_info = dump_debug_info
+
+ def _BeginGetIOStats(self):
+ """Gets I/O statistics before running test.
+
+ Return:
+ I/O stats object.The I/O stats object may be None if the test is not
+ performance test.
+ """
+ initial_io_stats = None
+ # Try to get the disk I/O statistics for all performance tests.
+ if self.performance_test and not self.rebaseline:
+ initial_io_stats = self.adb.GetIoStats()
+ return initial_io_stats
+
+ def _EndGetIOStats(self, initial_io_stats):
+ """Gets I/O statistics after running test and calcuate the I/O delta.
+
+ Args:
+ initial_io_stats: I/O stats object got from _BeginGetIOStats.
+
+ Return:
+ String for formated diso I/O statistics.
+ """
+ disk_io = ''
+ if self.performance_test and initial_io_stats:
+ final_io_stats = self.adb.GetIoStats()
+ for stat in final_io_stats:
+ disk_io += '\n' + PrintPerfResult(stat, stat,
+ [final_io_stats[stat] -
+ initial_io_stats[stat]],
+ stat.split('_')[1],
+ print_to_stdout=False)
+ logging.info(disk_io)
+ return disk_io
+
+ def GetDisabledPrefixes(self):
+ return ['DISABLED_', 'FLAKY_', 'FAILS_']
+
+ def _ParseGTestListTests(self, all_tests):
+ ret = []
+ current = ''
+ disabled_prefixes = self.GetDisabledPrefixes()
+ for test in all_tests:
+ if not test:
+ continue
+ if test[0] != ' ' and not test.endswith('.'):
+ # Ignore any lines with unexpected format.
+ continue
+ if test[0] != ' ' and test.endswith('.'):
+ current = test
+ continue
+ if 'YOU HAVE' in test:
+ break
+ test_name = test[2:]
+ if not any([test_name.startswith(x) for x in disabled_prefixes]):
+ ret += [current + test_name]
+ return ret
+
+ def PushDataAndPakFiles(self):
+ external_storage = self.adb.GetExternalStorage()
+ if (self.test_suite_basename == 'ui_unittests' or
+ self.test_suite_basename == 'unit_tests'):
+ self.adb.PushIfNeeded(
+ self.test_suite_dirname + '/chrome.pak',
+ external_storage + '/paks/chrome.pak')
+ self.adb.PushIfNeeded(
+ self.test_suite_dirname + '/locales/en-US.pak',
+ external_storage + '/paks/en-US.pak')
+ if self.test_suite_basename == 'unit_tests':
+ self.adb.PushIfNeeded(
+ self.test_suite_dirname + '/resources.pak',
+ external_storage + '/paks/resources.pak')
+ self.adb.PushIfNeeded(
+ self.test_suite_dirname + '/chrome_100_percent.pak',
+ external_storage + '/paks/chrome_100_percent.pak')
+ self.adb.PushIfNeeded(self.test_suite_dirname + '/test_data',
+ external_storage + '/test_data')
+ if self.test_suite_basename == 'content_unittests':
+ self.adb.PushIfNeeded(
+ self.test_suite_dirname + '/content_resources.pak',
+ external_storage + '/paks/content_resources.pak')
+
+ def _WatchTestOutput(self, p):
+ """Watches the test output.
+ Args:
+ p: the process generating output as created by pexpect.spawn.
+ """
+ ok_tests = []
+ failed_tests = []
+ crashed_tests = []
+ timed_out = False
+ overall_fail = False
+ re_run = re.compile('\[ RUN \] ?(.*)\r\n')
+ # APK tests rely on the PASSED tag.
+ re_passed = re.compile('\[ PASSED \] ?(.*)\r\n')
+ # Signal handlers are installed before starting tests
+ # to output the CRASHED marker when a crash happens.
+ re_crash = re.compile('\[ CRASHED \](.*)\r\n')
+ re_fail = re.compile('\[ FAILED \] ?(.*)\r\n')
+ re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
+ re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n')
+ io_stats_before = self._BeginGetIOStats()
+ try:
+ while True:
+ found = p.expect([re_run, re_passed, re_runner_fail],
+ timeout=self.timeout)
+ if found == 1: # matched PASSED.
+ break
+ if found == 2: # RUNNER_FAILED
+ logging.error('RUNNER_FAILED')
+ overall_fail = True
+ break
+ if self.dump_debug_info:
+ self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
+ full_test_name = p.match.group(1).replace('\r', '')
+ found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout)
+ if found == 0: # re_ok
+ if full_test_name == p.match.group(1).replace('\r', ''):
+ ok_tests += [BaseTestResult(full_test_name, p.before)]
+ continue
+ if found == 2: # re_crash
+ crashed_tests += [BaseTestResult(full_test_name, p.before)]
+ overall_fail = True
+ break
+ # The test failed.
+ failed_tests += [BaseTestResult(full_test_name, p.before)]
+ except pexpect.EOF:
+ logging.error('Test terminated - EOF')
+ except pexpect.TIMEOUT:
+ logging.error('Test terminated after %d second timeout.',
+ self.timeout)
+ timed_out = True
+ finally:
+ p.close()
+ if not self.rebaseline:
+ ok_tests += self._EndGetIOStats(io_stats_before)
+ ret_code = self._GetGTestReturnCode()
+ if ret_code:
+ failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code,
+ 'pexpect.before: %s'
+ '\npexpect.after: %s'
+ % (p.before,
+ p.after))]
+ # Create TestResults and return
+ return TestResults.FromRun(ok=ok_tests, failed=failed_tests,
+ crashed=crashed_tests, timed_out=timed_out,
+ overall_fail=overall_fail)
diff --git a/media/webrtc/trunk/build/android/pylib/test_package_apk.py b/media/webrtc/trunk/build/android/pylib/test_package_apk.py
new file mode 100644
index 000000000..42b9ade68
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/test_package_apk.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os
+import shlex
+import sys
+import tempfile
+import time
+
+import android_commands
+import constants
+from test_package import TestPackage
+from pylib import pexpect
+
+class TestPackageApk(TestPackage):
+ """A helper class for running APK-based native tests.
+
+ Args:
+ adb: ADB interface the tests are using.
+ device: Device to run the tests.
+ test_suite: A specific test suite to run, empty to run all.
+ timeout: Timeout for each test.
+ rebaseline: Whether or not to run tests in isolation and update the filter.
+ performance_test: Whether or not performance test(s).
+ cleanup_test_files: Whether or not to cleanup test files on device.
+ tool: Name of the Valgrind tool.
+ dump_debug_info: A debug_info object.
+ """
+
+ def __init__(self, adb, device, test_suite, timeout, rebaseline,
+ performance_test, cleanup_test_files, tool,
+ dump_debug_info):
+ TestPackage.__init__(self, adb, device, test_suite, timeout,
+ rebaseline, performance_test, cleanup_test_files,
+ tool, dump_debug_info)
+
+ def _CreateTestRunnerScript(self, options):
+ command_line_file = tempfile.NamedTemporaryFile()
+ # GTest expects argv[0] to be the executable path.
+ command_line_file.write(self.test_suite_basename + ' ' + options)
+ command_line_file.flush()
+ self.adb.PushIfNeeded(command_line_file.name,
+ constants.TEST_EXECUTABLE_DIR +
+ '/chrome-native-tests-command-line')
+
+ def _GetGTestReturnCode(self):
+ return None
+
+ def _GetFifo(self):
+ # The test.fifo path is determined by:
+ # testing/android/java/src/org/chromium/native_test/
+ # ChromeNativeTestActivity.java and
+ # testing/android/native_test_launcher.cc
+ return '/data/data/org.chromium.native_test/files/test.fifo'
+
+ def _ClearFifo(self):
+ self.adb.RunShellCommand('rm -f ' + self._GetFifo())
+
+ def _WatchFifo(self, timeout, logfile=None):
+ for i in range(10):
+ if self.adb.FileExistsOnDevice(self._GetFifo()):
+ print 'Fifo created...'
+ break
+ time.sleep(i)
+ else:
+ raise Exception('Unable to find fifo on device %s ' % self._GetFifo())
+ args = shlex.split(self.adb.Adb()._target_arg)
+ args += ['shell', 'cat', self._GetFifo()]
+ return pexpect.spawn('adb', args, timeout=timeout, logfile=logfile)
+
+ def GetAllTests(self):
+ """Returns a list of all tests available in the test suite."""
+ self._CreateTestRunnerScript('--gtest_list_tests')
+ try:
+ self.tool.SetupEnvironment()
+ # Clear and start monitoring logcat.
+ self._ClearFifo()
+ self.adb.RunShellCommand(
+ 'am start -n '
+ 'org.chromium.native_test/'
+ 'org.chromium.native_test.ChromeNativeTestActivity')
+ # Wait for native test to complete.
+ p = self._WatchFifo(timeout=30 * self.tool.GetTimeoutScale())
+ p.expect("<<ScopedMainEntryLogger")
+ p.close()
+ finally:
+ self.tool.CleanUpEnvironment()
+ # We need to strip the trailing newline.
+ content = [line.rstrip() for line in p.before.splitlines()]
+ ret = self._ParseGTestListTests(content)
+ return ret
+
+ def CreateTestRunnerScript(self, gtest_filter, test_arguments):
+ self._CreateTestRunnerScript('--gtest_filter=%s %s' % (gtest_filter,
+ test_arguments))
+
+ def RunTestsAndListResults(self):
+ try:
+ self.tool.SetupEnvironment()
+ self._ClearFifo()
+ self.adb.RunShellCommand(
+ 'am start -n '
+ 'org.chromium.native_test/'
+ 'org.chromium.native_test.ChromeNativeTestActivity')
+ finally:
+ self.tool.CleanUpEnvironment()
+ logfile = android_commands.NewLineNormalizer(sys.stdout)
+ return self._WatchTestOutput(self._WatchFifo(timeout=10, logfile=logfile))
+
+ def StripAndCopyExecutable(self):
+ # Always uninstall the previous one (by activity name); we don't
+ # know what was embedded in it.
+ self.adb.ManagedInstall(self.test_suite_full, False,
+ package_name='org.chromium.native_test')
+
+ def _GetTestSuiteBaseName(self):
+ """Returns the base name of the test suite."""
+ # APK test suite names end with '-debug.apk'
+ return os.path.basename(self.test_suite).rsplit('-debug', 1)[0]
diff --git a/media/webrtc/trunk/build/android/pylib/test_package_executable.py b/media/webrtc/trunk/build/android/pylib/test_package_executable.py
new file mode 100644
index 000000000..a11c768fb
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/test_package_executable.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import logging
+import os
+import shutil
+import sys
+import tempfile
+
+import cmd_helper
+import constants
+from test_package import TestPackage
+from pylib import pexpect
+
+
+class TestPackageExecutable(TestPackage):
+ """A helper class for running stand-alone executables."""
+
+ _TEST_RUNNER_RET_VAL_FILE = 'gtest_retval'
+
+ def __init__(self, adb, device, test_suite, timeout, rebaseline,
+ performance_test, cleanup_test_files, tool, dump_debug_info,
+ symbols_dir=None):
+ """
+ Args:
+ adb: ADB interface the tests are using.
+ device: Device to run the tests.
+ test_suite: A specific test suite to run, empty to run all.
+ timeout: Timeout for each test.
+ rebaseline: Whether or not to run tests in isolation and update the
+ filter.
+ performance_test: Whether or not performance test(s).
+ cleanup_test_files: Whether or not to cleanup test files on device.
+ tool: Name of the Valgrind tool.
+ dump_debug_info: A debug_info object.
+ symbols_dir: Directory to put the stripped binaries.
+ """
+ TestPackage.__init__(self, adb, device, test_suite, timeout,
+ rebaseline, performance_test, cleanup_test_files,
+ tool, dump_debug_info)
+ self.symbols_dir = symbols_dir
+
+ def _GetGTestReturnCode(self):
+ ret = None
+ ret_code = 1 # Assume failure if we can't find it
+ ret_code_file = tempfile.NamedTemporaryFile()
+ try:
+ if not self.adb.Adb().Pull(
+ self.adb.GetExternalStorage() + '/' +
+ TestPackageExecutable._TEST_RUNNER_RET_VAL_FILE,
+ ret_code_file.name):
+ logging.critical('Unable to pull gtest ret val file %s',
+ ret_code_file.name)
+ raise ValueError
+ ret_code = file(ret_code_file.name).read()
+ ret = int(ret_code)
+ except ValueError:
+ logging.critical('Error reading gtest ret val file %s [%s]',
+ ret_code_file.name, ret_code)
+ ret = 1
+ return ret
+
+ def _AddNativeCoverageExports(self):
+ # export GCOV_PREFIX set the path for native coverage results
+ # export GCOV_PREFIX_STRIP indicates how many initial directory
+ # names to strip off the hardwired absolute paths.
+ # This value is calculated in buildbot.sh and
+ # depends on where the tree is built.
+ # Ex: /usr/local/google/code/chrome will become
+ # /code/chrome if GCOV_PREFIX_STRIP=3
+ try:
+ depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
+ except KeyError:
+ logging.info('NATIVE_COVERAGE_DEPTH_STRIP is not defined: '
+ 'No native coverage.')
+ return ''
+ export_string = ('export GCOV_PREFIX="%s/gcov"\n' %
+ self.adb.GetExternalStorage())
+ export_string += 'export GCOV_PREFIX_STRIP=%s\n' % depth
+ return export_string
+
+ def GetAllTests(self):
+ """Returns a list of all tests available in the test suite."""
+ all_tests = self.adb.RunShellCommand(
+ '%s %s/%s --gtest_list_tests' %
+ (self.tool.GetTestWrapper(),
+ constants.TEST_EXECUTABLE_DIR,
+ self.test_suite_basename))
+ return self._ParseGTestListTests(all_tests)
+
+ def CreateTestRunnerScript(self, gtest_filter, test_arguments):
+ """Creates a test runner script and pushes to the device.
+
+ Args:
+ gtest_filter: A gtest_filter flag.
+ test_arguments: Additional arguments to pass to the test binary.
+ """
+ tool_wrapper = self.tool.GetTestWrapper()
+ sh_script_file = tempfile.NamedTemporaryFile()
+ # We need to capture the exit status from the script since adb shell won't
+ # propagate to us.
+ sh_script_file.write('cd %s\n'
+ '%s'
+ '%s %s/%s --gtest_filter=%s %s\n'
+ 'echo $? > %s' %
+ (constants.TEST_EXECUTABLE_DIR,
+ self._AddNativeCoverageExports(),
+ tool_wrapper, constants.TEST_EXECUTABLE_DIR,
+ self.test_suite_basename,
+ gtest_filter, test_arguments,
+ TestPackageExecutable._TEST_RUNNER_RET_VAL_FILE))
+ sh_script_file.flush()
+ cmd_helper.RunCmd(['chmod', '+x', sh_script_file.name])
+ self.adb.PushIfNeeded(
+ sh_script_file.name,
+ constants.TEST_EXECUTABLE_DIR + '/chrome_test_runner.sh')
+ logging.info('Conents of the test runner script: ')
+ for line in open(sh_script_file.name).readlines():
+ logging.info(' ' + line.rstrip())
+
+ def RunTestsAndListResults(self):
+ """Runs all the tests and checks for failures.
+
+ Returns:
+ A TestResults object.
+ """
+ args = ['adb', '-s', self.device, 'shell', 'sh',
+ constants.TEST_EXECUTABLE_DIR + '/chrome_test_runner.sh']
+ logging.info(args)
+ p = pexpect.spawn(args[0], args[1:], logfile=sys.stdout)
+ return self._WatchTestOutput(p)
+
+ def StripAndCopyExecutable(self):
+ """Strips and copies the executable to the device."""
+ if self.tool.NeedsDebugInfo():
+ target_name = self.test_suite
+ else:
+ target_name = self.test_suite + '_' + self.device + '_stripped'
+ should_strip = True
+ if os.path.isfile(target_name):
+ logging.info('Found target file %s' % target_name)
+ target_mtime = os.stat(target_name).st_mtime
+ source_mtime = os.stat(self.test_suite).st_mtime
+ if target_mtime > source_mtime:
+ logging.info('Target mtime (%d) is newer than source (%d), assuming '
+ 'no change.' % (target_mtime, source_mtime))
+ should_strip = False
+
+ if should_strip:
+ logging.info('Did not find up-to-date stripped binary. Generating a '
+ 'new one (%s).' % target_name)
+ # Whenever we generate a stripped binary, copy to the symbols dir. If we
+ # aren't stripping a new binary, assume it's there.
+ if self.symbols_dir:
+ if not os.path.exists(self.symbols_dir):
+ os.makedirs(self.symbols_dir)
+ shutil.copy(self.test_suite, self.symbols_dir)
+ strip = os.environ['STRIP']
+ cmd_helper.RunCmd([strip, self.test_suite, '-o', target_name])
+ test_binary = constants.TEST_EXECUTABLE_DIR + '/' + self.test_suite_basename
+ self.adb.PushIfNeeded(target_name, test_binary)
+
+ def _GetTestSuiteBaseName(self):
+ """Returns the base name of the test suite."""
+ return os.path.basename(self.test_suite)
diff --git a/media/webrtc/trunk/build/android/pylib/test_result.py b/media/webrtc/trunk/build/android/pylib/test_result.py
new file mode 100644
index 000000000..31a546aca
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/test_result.py
@@ -0,0 +1,193 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import json
+import logging
+import os
+import time
+import traceback
+
+import buildbot_report
+import constants
+
+
+class BaseTestResult(object):
+ """A single result from a unit test."""
+
+ def __init__(self, name, log):
+ self.name = name
+ self.log = log.replace('\r', '')
+
+
+class SingleTestResult(BaseTestResult):
+ """Result information for a single test.
+
+ Args:
+ full_name: Full name of the test.
+ start_date: Date in milliseconds when the test began running.
+ dur: Duration of the test run in milliseconds.
+ log: An optional string listing any errors.
+ """
+
+ def __init__(self, full_name, start_date, dur, log=''):
+ BaseTestResult.__init__(self, full_name, log)
+ name_pieces = full_name.rsplit('#')
+ if len(name_pieces) > 1:
+ self.test_name = name_pieces[1]
+ self.class_name = name_pieces[0]
+ else:
+ self.class_name = full_name
+ self.test_name = full_name
+ self.start_date = start_date
+ self.dur = dur
+
+
+class TestResults(object):
+ """Results of a test run."""
+
+ def __init__(self):
+ self.ok = []
+ self.failed = []
+ self.crashed = []
+ self.unknown = []
+ self.timed_out = False
+ self.overall_fail = False
+
+ @staticmethod
+ def FromRun(ok=None, failed=None, crashed=None, timed_out=False,
+ overall_fail=False):
+ ret = TestResults()
+ ret.ok = ok or []
+ ret.failed = failed or []
+ ret.crashed = crashed or []
+ ret.timed_out = timed_out
+ ret.overall_fail = overall_fail
+ return ret
+
+ @staticmethod
+ def FromTestResults(results):
+ """Combines a list of results in a single TestResults object."""
+ ret = TestResults()
+ for t in results:
+ ret.ok += t.ok
+ ret.failed += t.failed
+ ret.crashed += t.crashed
+ ret.unknown += t.unknown
+ if t.timed_out:
+ ret.timed_out = True
+ if t.overall_fail:
+ ret.overall_fail = True
+ return ret
+
+ @staticmethod
+ def FromPythonException(test_name, start_date_ms, exc_info):
+ """Constructs a TestResults with exception information for the given test.
+
+ Args:
+ test_name: name of the test which raised an exception.
+ start_date_ms: the starting time for the test.
+ exc_info: exception info, ostensibly from sys.exc_info().
+
+ Returns:
+ A TestResults object with a SingleTestResult in the failed list.
+ """
+ exc_type, exc_value, exc_traceback = exc_info
+ trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
+ exc_traceback))
+ log_msg = 'Exception:\n' + trace_info
+ duration_ms = (int(time.time()) * 1000) - start_date_ms
+
+ exc_result = SingleTestResult(
+ full_name='PythonWrapper#' + test_name,
+ start_date=start_date_ms,
+ dur=duration_ms,
+ log=(str(exc_type) + ' ' + log_msg))
+
+ results = TestResults()
+ results.failed.append(exc_result)
+ return results
+
+ def _Log(self, sorted_list):
+ for t in sorted_list:
+ logging.critical(t.name)
+ if t.log:
+ logging.critical(t.log)
+
+ def GetAllBroken(self):
+ """Returns the all broken tests including failed, crashed, unknown."""
+ return self.failed + self.crashed + self.unknown
+
+ def LogFull(self, test_group, test_suite, build_type):
+ """Output broken test logs, summarize in a log file and the test output."""
+ # Output all broken tests or 'passed' if none broken.
+ logging.critical('*' * 80)
+ logging.critical('Final result')
+ if self.failed:
+ logging.critical('Failed:')
+ self._Log(sorted(self.failed))
+ if self.crashed:
+ logging.critical('Crashed:')
+ self._Log(sorted(self.crashed))
+ if self.unknown:
+ logging.critical('Unknown:')
+ self._Log(sorted(self.unknown))
+ if not self.GetAllBroken():
+ logging.critical('Passed')
+ logging.critical('*' * 80)
+
+ # Summarize in a log file, if tests are running on bots.
+ if test_group and test_suite and os.environ.get('BUILDBOT_BUILDERNAME'):
+ log_file_path = os.path.join(constants.CHROME_DIR, 'out',
+ build_type, 'test_logs')
+ if not os.path.exists(log_file_path):
+ os.mkdir(log_file_path)
+ full_file_name = os.path.join(log_file_path, test_group)
+ if not os.path.exists(full_file_name):
+ with open(full_file_name, 'w') as log_file:
+ print >> log_file, '\n%s results for %s build %s:' % (
+ test_group, os.environ.get('BUILDBOT_BUILDERNAME'),
+ os.environ.get('BUILDBOT_BUILDNUMBER'))
+ log_contents = [' %s result : %d tests ran' % (test_suite,
+ len(self.ok) +
+ len(self.failed) +
+ len(self.crashed) +
+ len(self.unknown))]
+ content_pairs = [('passed', len(self.ok)), ('failed', len(self.failed)),
+ ('crashed', len(self.crashed))]
+ for (result, count) in content_pairs:
+ if count:
+ log_contents.append(', %d tests %s' % (count, result))
+ with open(full_file_name, 'a') as log_file:
+ print >> log_file, ''.join(log_contents)
+ content = {'test_group': test_group,
+ 'ok': [t.name for t in self.ok],
+ 'failed': [t.name for t in self.failed],
+ 'crashed': [t.name for t in self.failed],
+ 'unknown': [t.name for t in self.unknown],}
+ with open(os.path.join(log_file_path, 'results.json'), 'a') as json_file:
+ print >> json_file, json.dumps(content)
+
+ # Summarize in the test output.
+ summary_string = 'Summary:\n'
+ summary_string += 'RAN=%d\n' % (len(self.ok) + len(self.failed) +
+ len(self.crashed) + len(self.unknown))
+ summary_string += 'PASSED=%d\n' % (len(self.ok))
+ summary_string += 'FAILED=%d %s\n' % (len(self.failed),
+ [t.name for t in self.failed])
+ summary_string += 'CRASHED=%d %s\n' % (len(self.crashed),
+ [t.name for t in self.crashed])
+ summary_string += 'UNKNOWN=%d %s\n' % (len(self.unknown),
+ [t.name for t in self.unknown])
+ logging.critical(summary_string)
+ return summary_string
+
+ def PrintAnnotation(self):
+ """Print buildbot annotations for test results."""
+ if self.timed_out:
+ buildbot_report.PrintWarning()
+ elif self.failed or self.crashed or self.overall_fail:
+ buildbot_report.PrintError()
+ else:
+ print 'Step success!' # No annotation needed
diff --git a/media/webrtc/trunk/build/android/pylib/tests_annotations.py b/media/webrtc/trunk/build/android/pylib/tests_annotations.py
new file mode 100644
index 000000000..f2a183466
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/tests_annotations.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Annotations for python-driven tests."""
+
+import os
+
+
+class AnnotatedFunctions(object):
+ """A container for annotated methods."""
+ _ANNOTATED = {}
+
+ @staticmethod
+ def _AddFunction(annotation, function):
+ """Adds an annotated to function to our container.
+
+ Args:
+ annotation: the annotation string.
+ function: the function.
+ Returns:
+ The function passed in.
+ """
+ module_name = os.path.splitext(os.path.basename(
+ function.__globals__['__file__']))[0]
+ qualified_function_name = '.'.join([module_name, function.func_name])
+ function_list = AnnotatedFunctions._ANNOTATED.get(annotation, [])
+ function_list.append(qualified_function_name)
+ AnnotatedFunctions._ANNOTATED[annotation] = function_list
+ return function
+
+ @staticmethod
+ def IsAnnotated(annotation, qualified_function_name):
+ """True if function name (module.function) contains the annotation.
+
+ Args:
+ annotation: the annotation string.
+ qualified_function_name: the qualified function name.
+ Returns:
+ True if module.function contains the annotation.
+ """
+ return qualified_function_name in AnnotatedFunctions._ANNOTATED.get(
+ annotation, [])
+
+ @staticmethod
+ def GetTestAnnotations(qualified_function_name):
+ """Returns a list containing all annotations for the given function.
+
+ Args:
+ qualified_function_name: the qualified function name.
+ Returns:
+ List of all annotations for this function.
+ """
+ return [annotation
+ for annotation, tests in AnnotatedFunctions._ANNOTATED.iteritems()
+ if qualified_function_name in tests]
+
+
+# The following functions are annotations used for the python driven tests.
+def Smoke(function):
+ return AnnotatedFunctions._AddFunction('Smoke', function)
+
+
+def SmallTest(function):
+ return AnnotatedFunctions._AddFunction('SmallTest', function)
+
+
+def MediumTest(function):
+ return AnnotatedFunctions._AddFunction('MediumTest', function)
+
+
+def LargeTest(function):
+ return AnnotatedFunctions._AddFunction('LargeTest', function)
+
+
+def FlakyTest(function):
+ return AnnotatedFunctions._AddFunction('FlakyTest', function)
+
+
+def DisabledTest(function):
+ return AnnotatedFunctions._AddFunction('DisabledTest', function)
+
+
+def Feature(feature_list):
+ def _AddFeatures(function):
+ for feature in feature_list:
+ AnnotatedFunctions._AddFunction('Feature' + feature, function)
+ return AnnotatedFunctions._AddFunction('Feature', function)
+ return _AddFeatures
diff --git a/media/webrtc/trunk/build/android/pylib/valgrind_tools.py b/media/webrtc/trunk/build/android/pylib/valgrind_tools.py
new file mode 100644
index 000000000..810f6be18
--- /dev/null
+++ b/media/webrtc/trunk/build/android/pylib/valgrind_tools.py
@@ -0,0 +1,255 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Classes in this file define additional actions that need to be taken to run a
+test under some kind of runtime error detection tool.
+
+The interface is intended to be used as follows.
+
+1. For tests that simply run a native process (i.e. no activity is spawned):
+
+Call tool.CopyFiles().
+Prepend test command line with tool.GetTestWrapper().
+
+2. For tests that spawn an activity:
+
+Call tool.CopyFiles().
+Call tool.SetupEnvironment().
+Run the test as usual.
+Call tool.CleanUpEnvironment().
+"""
+
+import os.path
+import sys
+
+from constants import CHROME_DIR
+
+
+def SetChromeTimeoutScale(adb, scale):
+ """Sets the timeout scale in /data/local/tmp/chrome_timeout_scale to scale."""
+ path = '/data/local/tmp/chrome_timeout_scale'
+ if not scale or scale == 1.0:
+ # Delete if scale is None/0.0/1.0 since the default timeout scale is 1.0
+ adb.RunShellCommand('rm %s' % path)
+ else:
+ adb.SetFileContents(path, '%f' % scale)
+
+
+class BaseTool(object):
+ """A tool that does nothing."""
+
+ def GetTestWrapper(self):
+ """Returns a string that is to be prepended to the test command line."""
+ return ''
+
+ def GetUtilWrapper(self):
+ """Returns the wrapper name for the utilities.
+
+ Returns:
+ A string that is to be prepended to the command line of utility
+ processes (forwarder, etc.).
+ """
+ return ''
+
+ def CopyFiles(self):
+ """Copies tool-specific files to the device, create directories, etc."""
+ pass
+
+ def SetupEnvironment(self):
+ """Sets up the system environment for a test.
+
+ This is a good place to set system properties.
+ """
+ pass
+
+ def CleanUpEnvironment(self):
+ """Cleans up environment."""
+ pass
+
+ def GetTimeoutScale(self):
+ """Returns a multiplier that should be applied to timeout values."""
+ return 1.0
+
+ def NeedsDebugInfo(self):
+ """Whether this tool requires debug info.
+
+ Returns:
+ True if this tool can not work with stripped binaries.
+ """
+ return False
+
+
+class AddressSanitizerTool(BaseTool):
+ """AddressSanitizer tool."""
+
+ WRAPPER_PATH = '/system/bin/asanwrapper'
+
+ def __init__(self, adb):
+ self._adb = adb
+ self._wrap_properties = ['wrap.com.google.android.apps.ch',
+ 'wrap.org.chromium.native_test']
+
+ def CopyFiles(self):
+ """Copies ASan tools to the device."""
+ files = ['system/lib/libasan_preload.so',
+ 'system/bin/asanwrapper',
+ 'system/bin/asan/app_process',
+ 'system/bin/linker']
+ android_product_out = os.environ['ANDROID_PRODUCT_OUT']
+ self._adb.MakeSystemFolderWritable()
+ for f in files:
+ self._adb.PushIfNeeded(os.path.join(android_product_out, f),
+ os.path.join('/', f))
+
+ def GetTestWrapper(self):
+ return AddressSanitizerTool.WRAPPER_PATH
+
+ def GetUtilWrapper(self):
+ """Returns the wrapper for utilities, such as forwarder.
+
+ AddressSanitizer wrapper must be added to all instrumented binaries,
+ including forwarder and the like. This can be removed if such binaries
+ were built without instrumentation. """
+ return AddressSanitizerTool.WRAPPER_PATH
+
+ def SetupEnvironment(self):
+ for prop in self._wrap_properties:
+ self._adb.RunShellCommand('setprop %s "logwrapper %s"' % (
+ prop, self.GetTestWrapper()))
+ SetChromeTimeoutScale(self._adb, self.GetTimeoutScale())
+
+ def CleanUpEnvironment(self):
+ for prop in self._wrap_properties:
+ self._adb.RunShellCommand('setprop %s ""' % (prop,))
+ SetChromeTimeoutScale(self._adb, None)
+
+ def GetTimeoutScale(self):
+ # Very slow startup.
+ return 20.0
+
+
+class ValgrindTool(BaseTool):
+ """Base abstract class for Valgrind tools."""
+
+ VG_DIR = '/data/local/tmp/valgrind'
+ VGLOGS_DIR = '/data/local/tmp/vglogs'
+
+ def __init__(self, adb):
+ self._adb = adb
+ # exactly 31 chars, SystemProperties::PROP_NAME_MAX
+ self._wrap_properties = ['wrap.com.google.android.apps.ch',
+ 'wrap.org.chromium.native_test']
+
+ def CopyFiles(self):
+ """Copies Valgrind tools to the device."""
+ self._adb.RunShellCommand('rm -r %s; mkdir %s' %
+ (ValgrindTool.VG_DIR, ValgrindTool.VG_DIR))
+ self._adb.RunShellCommand('rm -r %s; mkdir %s' %
+ (ValgrindTool.VGLOGS_DIR,
+ ValgrindTool.VGLOGS_DIR))
+ files = self.GetFilesForTool()
+ for f in files:
+ self._adb.PushIfNeeded(os.path.join(CHROME_DIR, f),
+ os.path.join(ValgrindTool.VG_DIR,
+ os.path.basename(f)))
+
+ def SetupEnvironment(self):
+ """Sets up device environment."""
+ self._adb.RunShellCommand('chmod 777 /data/local/tmp')
+ for prop in self._wrap_properties:
+ self._adb.RunShellCommand('setprop %s "logwrapper %s"' % (
+ prop, self.GetTestWrapper()))
+ SetChromeTimeoutScale(self._adb, self.GetTimeoutScale())
+
+ def CleanUpEnvironment(self):
+ """Cleans up device environment."""
+ for prop in self._wrap_properties:
+ self._adb.RunShellCommand('setprop %s ""' % (prop,))
+ SetChromeTimeoutScale(self._adb, None)
+
+ def GetFilesForTool(self):
+ """Returns a list of file names for the tool."""
+ raise NotImplementedError()
+
+ def NeedsDebugInfo(self):
+ """Whether this tool requires debug info.
+
+ Returns:
+ True if this tool can not work with stripped binaries.
+ """
+ return True
+
+
+class MemcheckTool(ValgrindTool):
+ """Memcheck tool."""
+
+ def __init__(self, adb):
+ super(MemcheckTool, self).__init__(adb)
+
+ def GetFilesForTool(self):
+ """Returns a list of file names for the tool."""
+ return ['tools/valgrind/android/vg-chrome-wrapper.sh',
+ 'tools/valgrind/memcheck/suppressions.txt',
+ 'tools/valgrind/memcheck/suppressions_android.txt']
+
+ def GetTestWrapper(self):
+ """Returns a string that is to be prepended to the test command line."""
+ return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper.sh'
+
+ def GetTimeoutScale(self):
+ """Returns a multiplier that should be applied to timeout values."""
+ return 30
+
+
+class TSanTool(ValgrindTool):
+ """ThreadSanitizer tool. See http://code.google.com/p/data-race-test ."""
+
+ def __init__(self, adb):
+ super(TSanTool, self).__init__(adb)
+
+ def GetFilesForTool(self):
+ """Returns a list of file names for the tool."""
+ return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',
+ 'tools/valgrind/tsan/suppressions.txt',
+ 'tools/valgrind/tsan/suppressions_android.txt',
+ 'tools/valgrind/tsan/ignores.txt']
+
+ def GetTestWrapper(self):
+ """Returns a string that is to be prepended to the test command line."""
+ return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper-tsan.sh'
+
+ def GetTimeoutScale(self):
+ """Returns a multiplier that should be applied to timeout values."""
+ return 30.0
+
+
+TOOL_REGISTRY = {
+ 'memcheck': lambda x: MemcheckTool(x),
+ 'memcheck-renderer': lambda x: MemcheckTool(x),
+ 'tsan': lambda x: TSanTool(x),
+ 'tsan-renderer': lambda x: TSanTool(x),
+ 'asan': lambda x: AddressSanitizerTool(x),
+}
+
+
+def CreateTool(tool_name, adb):
+ """Creates a tool with the specified tool name.
+
+ Args:
+ tool_name: Name of the tool to create.
+ adb: ADB interface the tool will use.
+ Returns:
+ A tool for the specified tool_name.
+ """
+ if not tool_name:
+ return BaseTool()
+
+ ctor = TOOL_REGISTRY.get(tool_name)
+ if ctor:
+ return ctor(adb)
+ else:
+ print 'Unknown tool %s, available tools: %s' % (
+ tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
+ sys.exit(1)