diff options
Diffstat (limited to 'testing/mozbase/moztest')
-rw-r--r-- | testing/mozbase/moztest/moztest/__init__.py | 7 | ||||
-rw-r--r-- | testing/mozbase/moztest/moztest/adapters/__init__.py | 7 | ||||
-rw-r--r-- | testing/mozbase/moztest/moztest/adapters/unit.py | 225 | ||||
-rw-r--r-- | testing/mozbase/moztest/moztest/output/__init__.py | 0 | ||||
-rw-r--r-- | testing/mozbase/moztest/moztest/output/autolog.py | 73 | ||||
-rw-r--r-- | testing/mozbase/moztest/moztest/output/base.py | 53 | ||||
-rw-r--r-- | testing/mozbase/moztest/moztest/output/xunit.py | 93 | ||||
-rw-r--r-- | testing/mozbase/moztest/moztest/results.py | 323 | ||||
-rw-r--r-- | testing/mozbase/moztest/setup.py | 26 | ||||
-rw-r--r-- | testing/mozbase/moztest/tests/manifest.ini | 1 | ||||
-rw-r--r-- | testing/mozbase/moztest/tests/test.py | 55 |
11 files changed, 863 insertions, 0 deletions
diff --git a/testing/mozbase/moztest/moztest/__init__.py b/testing/mozbase/moztest/moztest/__init__.py new file mode 100644 index 000000000..5820ed2eb --- /dev/null +++ b/testing/mozbase/moztest/moztest/__init__.py @@ -0,0 +1,7 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +import adapters + +__all__ = ['adapters'] diff --git a/testing/mozbase/moztest/moztest/adapters/__init__.py b/testing/mozbase/moztest/moztest/adapters/__init__.py new file mode 100644 index 000000000..46bb3c6eb --- /dev/null +++ b/testing/mozbase/moztest/moztest/adapters/__init__.py @@ -0,0 +1,7 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +import unit + +__all__ = ['unit'] diff --git a/testing/mozbase/moztest/moztest/adapters/unit.py b/testing/mozbase/moztest/moztest/adapters/unit.py new file mode 100644 index 000000000..cee0e05e9 --- /dev/null +++ b/testing/mozbase/moztest/moztest/adapters/unit.py @@ -0,0 +1,225 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +import unittest +import sys +import time +import traceback + +try: + from unittest import TextTestResult +except ImportError: + # bug 971243 - python 2.6 compatibilty + from unittest import _TextTestResult as TextTestResult + +"""Adapter used to output structuredlog messages from unittest +testsuites""" + + +def get_test_class_name(test): + """ + This method is used to return the full class name from a + :class:`unittest.TestCase` instance. + + It is used as a default to define the "class_name" extra value + passed in structured loggers. You can override the default by + implementing a "get_test_class_name" method on you TestCase subclass. + """ + return "%s.%s" % (test.__class__.__module__, + test.__class__.__name__) + + +def get_test_method_name(test): + """ + This method is used to return the full method name from a + :class:`unittest.TestCase` instance. + + It is used as a default to define the "method_name" extra value + passed in structured loggers. You can override the default by + implementing a "get_test_method_name" method on you TestCase subclass. + """ + return test._testMethodName + + +class StructuredTestResult(TextTestResult): + + def __init__(self, *args, **kwargs): + self.logger = kwargs.pop('logger') + self.test_list = kwargs.pop("test_list", []) + self.result_callbacks = kwargs.pop('result_callbacks', []) + self.passed = 0 + self.testsRun = 0 + TextTestResult.__init__(self, *args, **kwargs) + + def call_callbacks(self, test, status): + debug_info = {} + for callback in self.result_callbacks: + info = callback(test, status) + if info is not None: + debug_info.update(info) + return debug_info + + def startTestRun(self): + # This would be an opportunity to call the logger's suite_start action, + # however some users may use multiple suites, and per the structured + # logging protocol, this action should only be called once. + pass + + def startTest(self, test): + self.testsRun += 1 + self.logger.test_start(test.id()) + + def stopTest(self, test): + pass + + def stopTestRun(self): + # This would be an opportunity to call the logger's suite_end action, + # however some users may use multiple suites, and per the structured + # logging protocol, this action should only be called once. + pass + + def _extract_err_message(self, err): + # Format an exception message in the style of unittest's _exc_info_to_string + # while maintaining a division between a traceback and a message. + exc_ty, val, _ = err + exc_msg = "".join(traceback.format_exception_only(exc_ty, val)) + if self.buffer: + output_msg = "\n".join([sys.stdout.getvalue(), sys.stderr.getvalue()]) + return "".join([exc_msg, output_msg]) + return exc_msg.rstrip() + + def _extract_stacktrace(self, err, test): + # Format an exception stack in the style of unittest's _exc_info_to_string + # while maintaining a division between a traceback and a message. + # This is mostly borrowed from unittest.result._exc_info_to_string. + + exctype, value, tb = err + while tb and self._is_relevant_tb_level(tb): + tb = tb.tb_next + # Header usually included by print_exception + lines = ["Traceback (most recent call last):\n"] + if exctype is test.failureException: + length = self._count_relevant_tb_levels(tb) + lines += traceback.format_tb(tb, length) + else: + lines += traceback.format_tb(tb) + return "".join(lines) + + def _get_class_method_name(self, test): + if hasattr(test, 'get_test_class_name'): + class_name = test.get_test_class_name() + else: + class_name = get_test_class_name(test) + + if hasattr(test, 'get_test_method_name'): + method_name = test.get_test_method_name() + else: + method_name = get_test_method_name(test) + + return { + 'class_name': class_name, + 'method_name': method_name + } + + def addError(self, test, err): + self.errors.append((test, self._exc_info_to_string(err, test))) + extra = self.call_callbacks(test, "ERROR") + extra.update(self._get_class_method_name(test)) + self.logger.test_end(test.id(), + "ERROR", + message=self._extract_err_message(err), + expected="PASS", + stack=self._extract_stacktrace(err, test), + extra=extra) + + def addFailure(self, test, err): + extra = self.call_callbacks(test, "FAIL") + extra.update(self._get_class_method_name(test)) + self.logger.test_end(test.id(), + "FAIL", + message=self._extract_err_message(err), + expected="PASS", + stack=self._extract_stacktrace(err, test), + extra=extra) + + def addSuccess(self, test): + extra = self._get_class_method_name(test) + self.logger.test_end(test.id(), + "PASS", + expected="PASS", + extra=extra) + + def addExpectedFailure(self, test, err): + extra = self.call_callbacks(test, "FAIL") + extra.update(self._get_class_method_name(test)) + self.logger.test_end(test.id(), + "FAIL", + message=self._extract_err_message(err), + expected="FAIL", + stack=self._extract_stacktrace(err, test), + extra=extra) + + def addUnexpectedSuccess(self, test): + extra = self.call_callbacks(test, "PASS") + extra.update(self._get_class_method_name(test)) + self.logger.test_end(test.id(), + "PASS", + expected="FAIL", + extra=extra) + + def addSkip(self, test, reason): + extra = self.call_callbacks(test, "SKIP") + extra.update(self._get_class_method_name(test)) + self.logger.test_end(test.id(), + "SKIP", + message=reason, + expected="PASS", + extra=extra) + + +class StructuredTestRunner(unittest.TextTestRunner): + + resultclass = StructuredTestResult + + def __init__(self, **kwargs): + """TestRunner subclass designed for structured logging. + + :params logger: A ``StructuredLogger`` to use for logging the test run. + :params test_list: An optional list of tests that will be passed along + the `suite_start` message. + + """ + + self.logger = kwargs.pop("logger") + self.test_list = kwargs.pop("test_list", []) + self.result_callbacks = kwargs.pop("result_callbacks", []) + unittest.TextTestRunner.__init__(self, **kwargs) + + def _makeResult(self): + return self.resultclass(self.stream, + self.descriptions, + self.verbosity, + logger=self.logger, + test_list=self.test_list) + + def run(self, test): + """Run the given test case or test suite.""" + result = self._makeResult() + result.failfast = self.failfast + result.buffer = self.buffer + startTime = time.time() + startTestRun = getattr(result, 'startTestRun', None) + if startTestRun is not None: + startTestRun() + try: + test(result) + finally: + stopTestRun = getattr(result, 'stopTestRun', None) + if stopTestRun is not None: + stopTestRun() + stopTime = time.time() + if hasattr(result, 'time_taken'): + result.time_taken = stopTime - startTime + + return result diff --git a/testing/mozbase/moztest/moztest/output/__init__.py b/testing/mozbase/moztest/moztest/output/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/testing/mozbase/moztest/moztest/output/__init__.py diff --git a/testing/mozbase/moztest/moztest/output/autolog.py b/testing/mozbase/moztest/moztest/output/autolog.py new file mode 100644 index 000000000..b6c8368bd --- /dev/null +++ b/testing/mozbase/moztest/moztest/output/autolog.py @@ -0,0 +1,73 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. + + +from mozautolog import RESTfulAutologTestGroup + +from base import Output, count, long_name + + +class AutologOutput(Output): + + def __init__(self, es_server='buildbot-es.metrics.scl3.mozilla.com:9200', + rest_server='http://brasstacks.mozilla.com/autologserver', + name='moztest', + harness='moztest'): + self.es_server = es_server + self.rest_server = rest_server + + def serialize(self, results_collection, file_obj): + grps = self.make_testgroups(results_collection) + for g in grps: + file_obj.write(g.serialize()) + + def make_testgroups(self, results_collection): + testgroups = [] + for context in results_collection.contexts: + coll = results_collection.subset(lambda t: t.context == context) + passed = coll.tests_with_result('PASS') + failed = coll.tests_with_result('UNEXPECTED-FAIL') + unexpected_passes = coll.tests_with_result('UNEXPECTED-PASS') + errors = coll.tests_with_result('ERROR') + skipped = coll.tests_with_result('SKIPPED') + known_fails = coll.tests_with_result('KNOWN-FAIL') + + testgroup = RESTfulAutologTestGroup( + testgroup=context.testgroup, + os=context.os, + platform=context.arch, + harness=context.harness, + server=self.es_server, + restserver=self.rest_server, + machine=context.hostname, + logfile=context.logfile, + ) + testgroup.add_test_suite( + testsuite=results_collection.suite_name, + elapsedtime=coll.time_taken, + passed=count(passed), + failed=count(failed) + count(errors) + count(unexpected_passes), + todo=count(skipped) + count(known_fails), + ) + testgroup.set_primary_product( + tree=context.tree, + revision=context.revision, + productname=context.product, + buildtype=context.buildtype, + ) + # need to call this again since we already used the generator + for f in coll.tests_with_result('UNEXPECTED-FAIL'): + testgroup.add_test_failure( + test=long_name(f), + text='\n'.join(f.output), + status=f.result, + ) + testgroups.append(testgroup) + return testgroups + + def post(self, data): + msg = "Must pass in a list returned by make_testgroups." + for d in data: + assert isinstance(d, RESTfulAutologTestGroup), msg + d.submit() diff --git a/testing/mozbase/moztest/moztest/output/base.py b/testing/mozbase/moztest/moztest/output/base.py new file mode 100644 index 000000000..7e39317fb --- /dev/null +++ b/testing/mozbase/moztest/moztest/output/base.py @@ -0,0 +1,53 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. + + +from __future__ import with_statement +from contextlib import closing +from StringIO import StringIO + +try: + from abc import abstractmethod +except ImportError: + # abc is python 2.6+ + # from https://github.com/mozilla/mozbase/blob/master/mozdevice/mozdevice/devicemanager.py + def abstractmethod(method): + line = method.func_code.co_firstlineno + filename = method.func_code.co_filename + + def not_implemented(*args, **kwargs): + raise NotImplementedError('Abstract method %s at File "%s", ' + 'line %s should be implemented by a concrete class' % + (repr(method), filename, line)) + return not_implemented + + +class Output(object): + """ Abstract base class for outputting test results """ + + @abstractmethod + def serialize(self, results_collection, file_obj): + """ Writes the string representation of the results collection + to the given file object""" + + def dump_string(self, results_collection): + """ Returns the string representation of the results collection """ + with closing(StringIO()) as s: + self.serialize(results_collection, s) + return s.getvalue() + + +# helper functions +def count(iterable): + """ Return the count of an iterable. Useful for generators. """ + c = 0 + for i in iterable: + c += 1 + return c + + +def long_name(test): + if test.test_class: + return '%s.%s' % (test.test_class, test.name) + return test.name diff --git a/testing/mozbase/moztest/moztest/output/xunit.py b/testing/mozbase/moztest/moztest/output/xunit.py new file mode 100644 index 000000000..0d0ef7bb4 --- /dev/null +++ b/testing/mozbase/moztest/moztest/output/xunit.py @@ -0,0 +1,93 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. + + +import xml.dom.minidom as dom + +from base import Output, count +from moztest.results import TestResult + + +class XUnitOutput(Output): + """ Class for writing xUnit formatted test results in an XML file """ + + def serialize(self, results_collection, file_obj): + """ Writes the xUnit formatted results to the given file object """ + + def _extract_xml(test_result, text='', result='Pass'): + if not isinstance(text, basestring): + text = '\n'.join(text) + + cls_name = test_result.test_class + + # if the test class is not already created, create it + if cls_name not in classes: + cls = doc.createElement('class') + cls.setAttribute('name', cls_name) + assembly.appendChild(cls) + classes[cls_name] = cls + + t = doc.createElement('test') + t.setAttribute('name', test_result.name) + t.setAttribute('result', result) + + if result == 'Fail': + f = doc.createElement('failure') + st = doc.createElement('stack-trace') + st.appendChild(doc.createTextNode(text)) + + f.appendChild(st) + t.appendChild(f) + + elif result == 'Skip': + r = doc.createElement('reason') + msg = doc.createElement('message') + msg.appendChild(doc.createTextNode(text)) + + r.appendChild(msg) + t.appendChild(f) + + cls = classes[cls_name] + cls.appendChild(t) + + doc = dom.Document() + + failed = sum([count(results_collection.tests_with_result(t)) + for t in TestResult.FAIL_RESULTS]) + passed = count(results_collection.tests_with_result('PASS')) + skipped = count(results_collection.tests_with_result('SKIPPED')) + + assembly = doc.createElement('assembly') + assembly.setAttribute('name', results_collection.suite_name) + assembly.setAttribute('time', str(results_collection.time_taken)) + assembly.setAttribute('total', str(len(results_collection))) + assembly.setAttribute('passed', str(passed)) + assembly.setAttribute('failed', str(failed)) + assembly.setAttribute('skipped', str(skipped)) + + classes = {} # str -> xml class element + + for tr in results_collection.tests_with_result('ERROR'): + _extract_xml(tr, text=tr.output, result='Fail') + + for tr in results_collection.tests_with_result('UNEXPECTED-FAIL'): + _extract_xml(tr, text=tr.output, result='Fail') + + for tr in results_collection.tests_with_result('UNEXPECTED-PASS'): + _extract_xml(tr, text='UNEXPECTED-PASS', result='Fail') + + for tr in results_collection.tests_with_result('SKIPPED'): + _extract_xml(tr, text=tr.output, result='Skip') + + for tr in results_collection.tests_with_result('KNOWN-FAIL'): + _extract_xml(tr, text=tr.output, result='Pass') + + for tr in results_collection.tests_with_result('PASS'): + _extract_xml(tr, result='Pass') + + for cls in classes.itervalues(): + assembly.appendChild(cls) + + doc.appendChild(assembly) + file_obj.write(doc.toxml(encoding='utf-8')) diff --git a/testing/mozbase/moztest/moztest/results.py b/testing/mozbase/moztest/moztest/results.py new file mode 100644 index 000000000..435665c67 --- /dev/null +++ b/testing/mozbase/moztest/moztest/results.py @@ -0,0 +1,323 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. + + +import time +import os +import mozinfo + + +class TestContext(object): + """ Stores context data about the test """ + + attrs = ['hostname', 'arch', 'env', 'os', 'os_version', 'tree', 'revision', + 'product', 'logfile', 'testgroup', 'harness', 'buildtype'] + + def __init__(self, hostname='localhost', tree='', revision='', product='', + logfile=None, arch='', operating_system='', testgroup='', + harness='moztest', buildtype=''): + self.hostname = hostname + self.arch = arch or mozinfo.processor + self.env = os.environ.copy() + self.os = operating_system or mozinfo.os + self.os_version = mozinfo.version + self.tree = tree + self.revision = revision + self.product = product + self.logfile = logfile + self.testgroup = testgroup + self.harness = harness + self.buildtype = buildtype + + def __str__(self): + return '%s (%s, %s)' % (self.hostname, self.os, self.arch) + + def __repr__(self): + return '<%s>' % self.__str__() + + def __eq__(self, other): + if not isinstance(other, TestContext): + return False + diffs = [a for a in self.attrs if getattr(self, a) != getattr(other, a)] + return len(diffs) == 0 + + def __hash__(self): + def get(attr): + value = getattr(self, attr) + if isinstance(value, dict): + value = frozenset(value.items()) + return value + return hash(frozenset([get(a) for a in self.attrs])) + + +class TestResult(object): + """ Stores test result data """ + + FAIL_RESULTS = [ + 'UNEXPECTED-PASS', + 'UNEXPECTED-FAIL', + 'ERROR', + ] + COMPUTED_RESULTS = FAIL_RESULTS + [ + 'PASS', + 'KNOWN-FAIL', + 'SKIPPED', + ] + POSSIBLE_RESULTS = [ + 'PASS', + 'FAIL', + 'SKIP', + 'ERROR', + ] + + def __init__(self, name, test_class='', time_start=None, context=None, + result_expected='PASS'): + """ Create a TestResult instance. + name = name of the test that is running + test_class = the class that the test belongs to + time_start = timestamp (seconds since UNIX epoch) of when the test started + running; if not provided, defaults to the current time + ! Provide 0 if you only have the duration + context = TestContext instance; can be None + result_expected = string representing the expected outcome of the test""" + + msg = "Result '%s' not in possible results: %s" %\ + (result_expected, ', '.join(self.POSSIBLE_RESULTS)) + assert isinstance(name, basestring), "name has to be a string" + assert result_expected in self.POSSIBLE_RESULTS, msg + + self.name = name + self.test_class = test_class + self.context = context + self.time_start = time_start if time_start is not None else time.time() + self.time_end = None + self._result_expected = result_expected + self._result_actual = None + self.result = None + self.filename = None + self.description = None + self.output = [] + self.reason = None + + @property + def test_name(self): + return '%s.py %s.%s' % (self.test_class.split('.')[0], + self.test_class, + self.name) + + def __str__(self): + return '%s | %s (%s) | %s' % (self.result or 'PENDING', + self.name, self.test_class, self.reason) + + def __repr__(self): + return '<%s>' % self.__str__() + + def calculate_result(self, expected, actual): + if actual == 'ERROR': + return 'ERROR' + if actual == 'SKIP': + return 'SKIPPED' + + if expected == 'PASS': + if actual == 'PASS': + return 'PASS' + if actual == 'FAIL': + return 'UNEXPECTED-FAIL' + + if expected == 'FAIL': + if actual == 'PASS': + return 'UNEXPECTED-PASS' + if actual == 'FAIL': + return 'KNOWN-FAIL' + + # if actual is skip or error, we return at the beginning, so if we get + # here it is definitely some kind of error + return 'ERROR' + + def infer_results(self, computed_result): + assert computed_result in self.COMPUTED_RESULTS + if computed_result == 'UNEXPECTED-PASS': + expected = 'FAIL' + actual = 'PASS' + elif computed_result == 'UNEXPECTED-FAIL': + expected = 'PASS' + actual = 'FAIL' + elif computed_result == 'KNOWN-FAIL': + expected = actual = 'FAIL' + elif computed_result == 'SKIPPED': + expected = actual = 'SKIP' + else: + return + self._result_expected = expected + self._result_actual = actual + + def finish(self, result, time_end=None, output=None, reason=None): + """ Marks the test as finished, storing its end time and status + ! Provide the duration as time_end if you only have that. """ + + if result in self.POSSIBLE_RESULTS: + self._result_actual = result + self.result = self.calculate_result(self._result_expected, + self._result_actual) + elif result in self.COMPUTED_RESULTS: + self.infer_results(result) + self.result = result + else: + valid = self.POSSIBLE_RESULTS + self.COMPUTED_RESULTS + msg = "Result '%s' not valid. Need one of: %s" %\ + (result, ', '.join(valid)) + raise ValueError(msg) + + # use lists instead of multiline strings + if isinstance(output, basestring): + output = output.splitlines() + + self.time_end = time_end if time_end is not None else time.time() + self.output = output or self.output + self.reason = reason + + @property + def finished(self): + """ Boolean saying if the test is finished or not """ + return self.result is not None + + @property + def duration(self): + """ Returns the time it took for the test to finish. If the test is + not finished, returns the elapsed time so far """ + if self.result is not None: + return self.time_end - self.time_start + else: + # returns the elapsed time + return time.time() - self.time_start + + +class TestResultCollection(list): + """ Container class that stores test results """ + + resultClass = TestResult + + def __init__(self, suite_name, time_taken=0, resultClass=None): + list.__init__(self) + self.suite_name = suite_name + self.time_taken = time_taken + if resultClass is not None: + self.resultClass = resultClass + + def __str__(self): + return "%s (%.2fs)\n%s" % (self.suite_name, self.time_taken, + list.__str__(self)) + + def subset(self, predicate): + tests = self.filter(predicate) + duration = 0 + sub = TestResultCollection(self.suite_name) + for t in tests: + sub.append(t) + duration += t.duration + sub.time_taken = duration + return sub + + @property + def contexts(self): + """ List of unique contexts for the test results contained """ + cs = [tr.context for tr in self] + return list(set(cs)) + + def filter(self, predicate): + """ Returns a generator of TestResults that satisfy a given predicate """ + return (tr for tr in self if predicate(tr)) + + def tests_with_result(self, result): + """ Returns a generator of TestResults with the given result """ + msg = "Result '%s' not in possible results: %s" %\ + (result, ', '.join(self.resultClass.COMPUTED_RESULTS)) + assert result in self.resultClass.COMPUTED_RESULTS, msg + return self.filter(lambda t: t.result == result) + + @property + def tests(self): + """ Generator of all tests in the collection """ + return (t for t in self) + + def add_result(self, test, result_expected='PASS', + result_actual='PASS', output='', context=None): + def get_class(test): + return test.__class__.__module__ + '.' + test.__class__.__name__ + + t = self.resultClass(name=str(test).split()[0], test_class=get_class(test), + time_start=0, result_expected=result_expected, + context=context) + t.finish(result_actual, time_end=0, reason=relevant_line(output), + output=output) + self.append(t) + + @property + def num_failures(self): + fails = 0 + for t in self: + if t.result in self.resultClass.FAIL_RESULTS: + fails += 1 + return fails + + def add_unittest_result(self, result, context=None): + """ Adds the python unittest result provided to the collection""" + if hasattr(result, 'time_taken'): + self.time_taken += result.time_taken + + for test, output in result.errors: + self.add_result(test, result_actual='ERROR', output=output) + + for test, output in result.failures: + self.add_result(test, result_actual='FAIL', + output=output) + + if hasattr(result, 'unexpectedSuccesses'): + for test in result.unexpectedSuccesses: + self.add_result(test, result_expected='FAIL', + result_actual='PASS') + + if hasattr(result, 'skipped'): + for test, output in result.skipped: + self.add_result(test, result_expected='SKIP', + result_actual='SKIP', output=output) + + if hasattr(result, 'expectedFailures'): + for test, output in result.expectedFailures: + self.add_result(test, result_expected='FAIL', + result_actual='FAIL', output=output) + + # unittest does not store these by default + if hasattr(result, 'tests_passed'): + for test in result.tests_passed: + self.add_result(test) + + @classmethod + def from_unittest_results(cls, context, *results): + """ Creates a TestResultCollection containing the given python + unittest results """ + + if not results: + return cls('from unittest') + + # all the TestResult instances share the same context + context = context or TestContext() + + collection = cls('from %s' % results[0].__class__.__name__) + + for result in results: + collection.add_unittest_result(result, context) + + return collection + + +# used to get exceptions/errors from tracebacks +def relevant_line(s): + KEYWORDS = ('Error:', 'Exception:', 'error:', 'exception:') + lines = s.splitlines() + for line in lines: + for keyword in KEYWORDS: + if keyword in line: + return line + return 'N/A' diff --git a/testing/mozbase/moztest/setup.py b/testing/mozbase/moztest/setup.py new file mode 100644 index 000000000..9c7c11789 --- /dev/null +++ b/testing/mozbase/moztest/setup.py @@ -0,0 +1,26 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. + +from setuptools import setup, find_packages + +PACKAGE_VERSION = '0.8' + +# dependencies +deps = ['mozinfo'] + +setup(name='moztest', + version=PACKAGE_VERSION, + description="Package for storing and outputting Mozilla test results", + long_description="see http://mozbase.readthedocs.org/", + classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers + keywords='mozilla', + author='Mozilla Automation and Tools team', + author_email='tools@lists.mozilla.org', + url='https://wiki.mozilla.org/Auto-tools/Projects/Mozbase', + license='MPL', + packages=find_packages(), + include_package_data=True, + zip_safe=False, + install_requires=deps, + ) diff --git a/testing/mozbase/moztest/tests/manifest.ini b/testing/mozbase/moztest/tests/manifest.ini new file mode 100644 index 000000000..528fdea7b --- /dev/null +++ b/testing/mozbase/moztest/tests/manifest.ini @@ -0,0 +1 @@ +[test.py] diff --git a/testing/mozbase/moztest/tests/test.py b/testing/mozbase/moztest/tests/test.py new file mode 100644 index 000000000..93096ec2a --- /dev/null +++ b/testing/mozbase/moztest/tests/test.py @@ -0,0 +1,55 @@ +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this file, +# You can obtain one at http://mozilla.org/MPL/2.0/. + +import math +import time +import unittest + +from moztest.results import TestContext, TestResult, TestResultCollection + + +class Result(unittest.TestCase): + + def test_results(self): + self.assertRaises(AssertionError, + lambda: TestResult('test', result_expected='hello')) + t = TestResult('test') + self.assertRaises(ValueError, lambda: t.finish(result='good bye')) + + def test_time(self): + now = time.time() + t = TestResult('test') + time.sleep(1) + t.finish('PASS') + duration = time.time() - now + self.assertTrue(math.fabs(duration - t.duration) < 1) + + def test_custom_time(self): + t = TestResult('test', time_start=0) + t.finish(result='PASS', time_end=1000) + self.assertEqual(t.duration, 1000) + + +class Collection(unittest.TestCase): + + def setUp(self): + c1 = TestContext('host1') + c2 = TestContext('host2') + c3 = TestContext('host2') + c3.os = 'B2G' + c4 = TestContext('host1') + + t1 = TestResult('t1', context=c1) + t2 = TestResult('t2', context=c2) + t3 = TestResult('t3', context=c3) + t4 = TestResult('t4', context=c4) + + self.collection = TestResultCollection('tests') + self.collection.extend([t1, t2, t3, t4]) + + def test_unique_contexts(self): + self.assertEqual(len(self.collection.contexts), 3) + +if __name__ == '__main__': + unittest.main() |