summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/tools/pytest/doc/en/example
diff options
context:
space:
mode:
Diffstat (limited to 'testing/web-platform/tests/tools/pytest/doc/en/example')
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py238
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py10
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py5
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py14
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py42
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst79
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py1
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py18
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py3
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py6
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/index.rst34
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg4
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst592
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py52
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst91
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py40
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst475
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py16
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py7
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py11
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst192
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst598
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst751
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/special.rst72
-rw-r--r--testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py30
30 files changed, 3397 insertions, 0 deletions
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py
new file mode 100644
index 000000000..a4ff758b1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/failure_demo.py
@@ -0,0 +1,238 @@
+from pytest import raises
+import _pytest._code
+import py
+
+def otherfunc(a,b):
+ assert a==b
+
+def somefunc(x,y):
+ otherfunc(x,y)
+
+def otherfunc_multi(a,b):
+ assert (a ==
+ b)
+
+def test_generative(param1, param2):
+ assert param1 * 2 < param2
+
+def pytest_generate_tests(metafunc):
+ if 'param1' in metafunc.fixturenames:
+ metafunc.addcall(funcargs=dict(param1=3, param2=6))
+
+class TestFailing(object):
+ def test_simple(self):
+ def f():
+ return 42
+ def g():
+ return 43
+
+ assert f() == g()
+
+ def test_simple_multiline(self):
+ otherfunc_multi(
+ 42,
+ 6*9)
+
+ def test_not(self):
+ def f():
+ return 42
+ assert not f()
+
+class TestSpecialisedExplanations(object):
+ def test_eq_text(self):
+ assert 'spam' == 'eggs'
+
+ def test_eq_similar_text(self):
+ assert 'foo 1 bar' == 'foo 2 bar'
+
+ def test_eq_multiline_text(self):
+ assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+
+ def test_eq_long_text(self):
+ a = '1'*100 + 'a' + '2'*100
+ b = '1'*100 + 'b' + '2'*100
+ assert a == b
+
+ def test_eq_long_text_multiline(self):
+ a = '1\n'*100 + 'a' + '2\n'*100
+ b = '1\n'*100 + 'b' + '2\n'*100
+ assert a == b
+
+ def test_eq_list(self):
+ assert [0, 1, 2] == [0, 1, 3]
+
+ def test_eq_list_long(self):
+ a = [0]*100 + [1] + [3]*100
+ b = [0]*100 + [2] + [3]*100
+ assert a == b
+
+ def test_eq_dict(self):
+ assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
+
+ def test_eq_set(self):
+ assert set([0, 10, 11, 12]) == set([0, 20, 21])
+
+ def test_eq_longer_list(self):
+ assert [1,2] == [1,2,3]
+
+ def test_in_list(self):
+ assert 1 in [0, 2, 3, 4, 5]
+
+ def test_not_in_text_multiline(self):
+ text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
+ assert 'foo' not in text
+
+ def test_not_in_text_single(self):
+ text = 'single foo line'
+ assert 'foo' not in text
+
+ def test_not_in_text_single_long(self):
+ text = 'head ' * 50 + 'foo ' + 'tail ' * 20
+ assert 'foo' not in text
+
+ def test_not_in_text_single_long_term(self):
+ text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
+ assert 'f'*70 not in text
+
+
+def test_attribute():
+ class Foo(object):
+ b = 1
+ i = Foo()
+ assert i.b == 2
+
+
+def test_attribute_instance():
+ class Foo(object):
+ b = 1
+ assert Foo().b == 2
+
+
+def test_attribute_failure():
+ class Foo(object):
+ def _get_b(self):
+ raise Exception('Failed to get attrib')
+ b = property(_get_b)
+ i = Foo()
+ assert i.b == 2
+
+
+def test_attribute_multiple():
+ class Foo(object):
+ b = 1
+ class Bar(object):
+ b = 2
+ assert Foo().b == Bar().b
+
+
+def globf(x):
+ return x+1
+
+class TestRaises:
+ def test_raises(self):
+ s = 'qwe'
+ raises(TypeError, "int(s)")
+
+ def test_raises_doesnt(self):
+ raises(IOError, "int('3')")
+
+ def test_raise(self):
+ raise ValueError("demo error")
+
+ def test_tupleerror(self):
+ a,b = [1]
+
+ def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
+ l = [1,2,3]
+ print ("l is %r" % l)
+ a,b = l.pop()
+
+ def test_some_error(self):
+ if namenotexi:
+ pass
+
+ def func1(self):
+ assert 41 == 42
+
+
+# thanks to Matthew Scott for this test
+def test_dynamic_compile_shows_nicely():
+ src = 'def foo():\n assert 1 == 0\n'
+ name = 'abc-123'
+ module = py.std.imp.new_module(name)
+ code = _pytest._code.compile(src, name, 'exec')
+ py.builtin.exec_(code, module.__dict__)
+ py.std.sys.modules[name] = module
+ module.foo()
+
+
+
+class TestMoreErrors:
+ def test_complex_error(self):
+ def f():
+ return 44
+ def g():
+ return 43
+ somefunc(f(), g())
+
+ def test_z1_unpack_error(self):
+ l = []
+ a,b = l
+
+ def test_z2_type_error(self):
+ l = 3
+ a,b = l
+
+ def test_startswith(self):
+ s = "123"
+ g = "456"
+ assert s.startswith(g)
+
+ def test_startswith_nested(self):
+ def f():
+ return "123"
+ def g():
+ return "456"
+ assert f().startswith(g())
+
+ def test_global_func(self):
+ assert isinstance(globf(42), float)
+
+ def test_instance(self):
+ self.x = 6*7
+ assert self.x != 42
+
+ def test_compare(self):
+ assert globf(10) < 5
+
+ def test_try_finally(self):
+ x = 1
+ try:
+ assert x == 0
+ finally:
+ x = 0
+
+
+class TestCustomAssertMsg:
+
+ def test_single_line(self):
+ class A:
+ a = 1
+ b = 2
+ assert A.a == b, "A.a appears not to be b"
+
+ def test_multiline(self):
+ class A:
+ a = 1
+ b = 2
+ assert A.a == b, "A.a appears not to be b\n" \
+ "or does not appear to be b\none of those"
+
+ def test_custom_repr(self):
+ class JSON:
+ a = 1
+ def __repr__(self):
+ return "This is JSON\n{\n 'foo': 'bar'\n}"
+ a = JSON()
+ b = 2
+ assert a.a == b, a
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py
new file mode 100644
index 000000000..71e8c54be
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/conftest.py
@@ -0,0 +1,10 @@
+import pytest, py
+mydir = py.path.local(__file__).dirpath()
+
+def pytest_runtest_setup(item):
+ if isinstance(item, pytest.Function):
+ if not item.fspath.relto(mydir):
+ return
+ mod = item.getparent(pytest.Module).obj
+ if hasattr(mod, 'hello'):
+ print ("mod.hello %r" % (mod.hello,))
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py
new file mode 100644
index 000000000..828e6b9fd
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/global_testmodule_config/test_hello.py
@@ -0,0 +1,5 @@
+
+hello = "world"
+
+def test_func():
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py
new file mode 100644
index 000000000..2e5cd20b1
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_failures.py
@@ -0,0 +1,14 @@
+
+import py
+failure_demo = py.path.local(__file__).dirpath('failure_demo.py')
+pytest_plugins = 'pytester',
+
+def test_failure_demo_fails_properly(testdir):
+ target = testdir.tmpdir.join(failure_demo.basename)
+ failure_demo.copy(target)
+ failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
+ result = testdir.runpytest(target, syspathinsert=True)
+ result.stdout.fnmatch_lines([
+ "*42 failed*"
+ ])
+ assert result.ret != 0
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py
new file mode 100644
index 000000000..512330cb4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/assertion/test_setup_flow_example.py
@@ -0,0 +1,42 @@
+def setup_module(module):
+ module.TestStateFullThing.classcount = 0
+
+class TestStateFullThing:
+ def setup_class(cls):
+ cls.classcount += 1
+
+ def teardown_class(cls):
+ cls.classcount -= 1
+
+ def setup_method(self, method):
+ self.id = eval(method.__name__[5:])
+
+ def test_42(self):
+ assert self.classcount == 1
+ assert self.id == 42
+
+ def test_23(self):
+ assert self.classcount == 1
+ assert self.id == 23
+
+def teardown_module(module):
+ assert module.TestStateFullThing.classcount == 0
+
+""" For this example the control flow happens as follows::
+ import test_setup_flow_example
+ setup_module(test_setup_flow_example)
+ setup_class(TestStateFullThing)
+ instance = TestStateFullThing()
+ setup_method(instance, instance.test_42)
+ instance.test_42()
+ setup_method(instance, instance.test_23)
+ instance.test_23()
+ teardown_class(TestStateFullThing)
+ teardown_module(test_setup_flow_example)
+
+Note that ``setup_class(TestStateFullThing)`` is called and not
+``TestStateFullThing.setup_class()`` which would require you
+to insert ``setup_class = classmethod(setup_class)`` to make
+your setup function callable.
+"""
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst
new file mode 100644
index 000000000..1bc32b283
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/attic.rst
@@ -0,0 +1,79 @@
+
+.. _`accept example`:
+
+example: specifying and selecting acceptance tests
+--------------------------------------------------------------
+
+.. sourcecode:: python
+
+ # ./conftest.py
+ def pytest_option(parser):
+ group = parser.getgroup("myproject")
+ group.addoption("-A", dest="acceptance", action="store_true",
+ help="run (slow) acceptance tests")
+
+ def pytest_funcarg__accept(request):
+ return AcceptFixture(request)
+
+ class AcceptFixture:
+ def __init__(self, request):
+ if not request.config.option.acceptance:
+ pytest.skip("specify -A to run acceptance tests")
+ self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True)
+
+ def run(self, cmd):
+ """ called by test code to execute an acceptance test. """
+ self.tmpdir.chdir()
+ return py.process.cmdexec(cmd)
+
+
+and the actual test function example:
+
+.. sourcecode:: python
+
+ def test_some_acceptance_aspect(accept):
+ accept.tmpdir.mkdir("somesub")
+ result = accept.run("ls -la")
+ assert "somesub" in result
+
+If you run this test without specifying a command line option
+the test will get skipped with an appropriate message. Otherwise
+you can start to add convenience and test support methods
+to your AcceptFixture and drive running of tools or
+applications and provide ways to do assertions about
+the output.
+
+.. _`decorate a funcarg`:
+
+example: decorating a funcarg in a test module
+--------------------------------------------------------------
+
+For larger scale setups it's sometimes useful to decorate
+a funcarg just for a particular test module. We can
+extend the `accept example`_ by putting this in our test module:
+
+.. sourcecode:: python
+
+ def pytest_funcarg__accept(request):
+ # call the next factory (living in our conftest.py)
+ arg = request.getfuncargvalue("accept")
+ # create a special layout in our tempdir
+ arg.tmpdir.mkdir("special")
+ return arg
+
+ class TestSpecialAcceptance:
+ def test_sometest(self, accept):
+ assert accept.tmpdir.join("special").check()
+
+Our module level factory will be invoked first and it can
+ask its request object to call the next factory and then
+decorate its result. This mechanism allows us to stay
+ignorant of how/where the function argument is provided -
+in our example from a `conftest plugin`_.
+
+sidenote: the temporary directory used here are instances of
+the `py.path.local`_ class which provides many of the os.path
+methods in a convenient way.
+
+.. _`py.path.local`: ../path.html#local
+.. _`conftest plugin`: customize.html#conftestplugin
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py
new file mode 100644
index 000000000..f905738c4
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/conftest.py
@@ -0,0 +1 @@
+collect_ignore = ["nonpython"]
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py
new file mode 100644
index 000000000..d689c11b2
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/conftest.py
@@ -0,0 +1,18 @@
+
+import pytest
+
+@pytest.fixture("session")
+def setup(request):
+ setup = CostlySetup()
+ request.addfinalizer(setup.finalize)
+ return setup
+
+class CostlySetup:
+ def __init__(self):
+ import time
+ print ("performing costly setup")
+ time.sleep(5)
+ self.timecostly = 1
+
+ def finalize(self):
+ del self.timecostly
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py
new file mode 100644
index 000000000..d97657867
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub1/test_quick.py
@@ -0,0 +1,3 @@
+
+def test_quick(setup):
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py
new file mode 100644
index 000000000..792d60054
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py
new file mode 100644
index 000000000..6ed6ee4d8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/costlysetup/sub2/test_two.py
@@ -0,0 +1,6 @@
+def test_something(setup):
+ assert setup.timecostly == 1
+
+def test_something_more(setup):
+ assert setup.timecostly == 1
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/index.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/index.rst
new file mode 100644
index 000000000..363de5ab7
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/index.rst
@@ -0,0 +1,34 @@
+
+.. _examples:
+
+Usages and Examples
+===========================================
+
+Here is a (growing) list of examples. :ref:`Contact <contact>` us if you
+need more examples or have questions. Also take a look at the
+:ref:`comprehensive documentation <toc>` which contains many example
+snippets as well. Also, `pytest on stackoverflow.com
+<http://stackoverflow.com/search?q=pytest>`_ often comes with example
+answers.
+
+For basic examples, see
+
+- :doc:`../getting-started` for basic introductory examples
+- :ref:`assert` for basic assertion examples
+- :ref:`fixtures` for basic fixture/setup examples
+- :ref:`parametrize` for basic test function parametrization
+- :doc:`../unittest` for basic unittest integration
+- :doc:`../nose` for basic nosetests integration
+
+The following examples aim at various use cases you might encounter.
+
+.. toctree::
+ :maxdepth: 2
+
+ reportingdemo
+ simple
+ parametrize
+ markers
+ special
+ pythoncollection
+ nonpython
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg b/testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg
new file mode 100644
index 000000000..02d3750ee
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/layout1/setup.cfg
@@ -0,0 +1,4 @@
+[pytest]
+testfilepatterns =
+ ${topdir}/tests/unit/test_${basename}
+ ${topdir}/tests/functional/*.py
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst
new file mode 100644
index 000000000..6bdc60347
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/markers.rst
@@ -0,0 +1,592 @@
+
+.. _`mark examples`:
+
+Working with custom markers
+=================================================
+
+Here are some example using the :ref:`mark` mechanism.
+
+Marking test functions and selecting them for a run
+----------------------------------------------------
+
+You can "mark" a test function with custom metadata like this::
+
+ # content of test_server.py
+
+ import pytest
+ @pytest.mark.webtest
+ def test_send_http():
+ pass # perform some webtest test for your app
+ def test_something_quick():
+ pass
+ def test_another():
+ pass
+ class TestClass:
+ def test_method(self):
+ pass
+
+.. versionadded:: 2.2
+
+You can then restrict a test run to only run tests marked with ``webtest``::
+
+ $ py.test -v -m webtest
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_send_http PASSED
+
+ ======= 3 tests deselected by "-m 'webtest'" ========
+ ======= 1 passed, 3 deselected in 0.12 seconds ========
+
+Or the inverse, running all tests except the webtest ones::
+
+ $ py.test -v -m "not webtest"
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_something_quick PASSED
+ test_server.py::test_another PASSED
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 tests deselected by "-m 'not webtest'" ========
+ ======= 3 passed, 1 deselected in 0.12 seconds ========
+
+Selecting tests based on their node ID
+--------------------------------------
+
+You can provide one or more :ref:`node IDs <node-id>` as positional
+arguments to select only specified tests. This makes it easy to select
+tests based on their module, class, method, or function name::
+
+ $ py.test -v test_server.py::TestClass::test_method
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 5 items
+
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 passed in 0.12 seconds ========
+
+You can also select on the class::
+
+ $ py.test -v test_server.py::TestClass
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 passed in 0.12 seconds ========
+
+Or select multiple nodes::
+
+ $ py.test -v test_server.py::TestClass test_server.py::test_send_http
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 8 items
+
+ test_server.py::TestClass::test_method PASSED
+ test_server.py::test_send_http PASSED
+
+ ======= 2 passed in 0.12 seconds ========
+
+.. _node-id:
+
+.. note::
+
+ Node IDs are of the form ``module.py::class::method`` or
+ ``module.py::function``. Node IDs control which tests are
+ collected, so ``module.py::class`` will select all test methods
+ on the class. Nodes are also created for each parameter of a
+ parametrized fixture or test, so selecting a parametrized test
+ must include the parameter value, e.g.
+ ``module.py::function[param]``.
+
+ Node IDs for failing tests are displayed in the test summary info
+ when running py.test with the ``-rf`` option. You can also
+ construct Node IDs from the output of ``py.test --collectonly``.
+
+Using ``-k expr`` to select tests based on their name
+-------------------------------------------------------
+
+.. versionadded: 2.0/2.3.4
+
+You can use the ``-k`` command line option to specify an expression
+which implements a substring match on the test names instead of the
+exact match on markers that ``-m`` provides. This makes it easy to
+select tests based on their names::
+
+ $ py.test -v -k http # running with the above defined example module
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_send_http PASSED
+
+ ======= 3 tests deselected by '-khttp' ========
+ ======= 1 passed, 3 deselected in 0.12 seconds ========
+
+And you can also run all tests except the ones that match the keyword::
+
+ $ py.test -k "not send_http" -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_something_quick PASSED
+ test_server.py::test_another PASSED
+ test_server.py::TestClass::test_method PASSED
+
+ ======= 1 tests deselected by '-knot send_http' ========
+ ======= 3 passed, 1 deselected in 0.12 seconds ========
+
+Or to select "http" and "quick" tests::
+
+ $ py.test -k "http or quick" -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 4 items
+
+ test_server.py::test_send_http PASSED
+ test_server.py::test_something_quick PASSED
+
+ ======= 2 tests deselected by '-khttp or quick' ========
+ ======= 2 passed, 2 deselected in 0.12 seconds ========
+
+.. note::
+
+ If you are using expressions such as "X and Y" then both X and Y
+ need to be simple non-keyword names. For example, "pass" or "from"
+ will result in SyntaxErrors because "-k" evaluates the expression.
+
+ However, if the "-k" argument is a simple string, no such restrictions
+ apply. Also "-k 'not STRING'" has no restrictions. You can also
+ specify numbers like "-k 1.3" to match tests which are parametrized
+ with the float "1.3".
+
+Registering markers
+-------------------------------------
+
+.. versionadded:: 2.2
+
+.. ini-syntax for custom markers:
+
+Registering markers for your test suite is simple::
+
+ # content of pytest.ini
+ [pytest]
+ markers =
+ webtest: mark a test as a webtest.
+
+You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
+
+ $ py.test --markers
+ @pytest.mark.webtest: mark a test as a webtest.
+
+ @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
+
+ @pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
+
+ @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
+
+ @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
+
+ @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
+
+ @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
+
+
+For an example on how to add and work with markers from a plugin, see
+:ref:`adding a custom marker from a plugin`.
+
+.. note::
+
+ It is recommended to explicitly register markers so that:
+
+ * there is one place in your test suite defining your markers
+
+ * asking for existing markers via ``py.test --markers`` gives good output
+
+ * typos in function markers are treated as an error if you use
+ the ``--strict`` option. Future versions of ``pytest`` are probably
+ going to start treating non-registered markers as errors at some point.
+
+.. _`scoped-marking`:
+
+Marking whole classes or modules
+----------------------------------------------------
+
+You may use ``pytest.mark`` decorators with classes to apply markers to all of
+its test methods::
+
+ # content of test_mark_classlevel.py
+ import pytest
+ @pytest.mark.webtest
+ class TestClass:
+ def test_startup(self):
+ pass
+ def test_startup_and_more(self):
+ pass
+
+This is equivalent to directly applying the decorator to the
+two test functions.
+
+To remain backward-compatible with Python 2.4 you can also set a
+``pytestmark`` attribute on a TestClass like this::
+
+ import pytest
+
+ class TestClass:
+ pytestmark = pytest.mark.webtest
+
+or if you need to use multiple markers you can use a list::
+
+ import pytest
+
+ class TestClass:
+ pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
+
+You can also set a module level marker::
+
+ import pytest
+ pytestmark = pytest.mark.webtest
+
+in which case it will be applied to all functions and
+methods defined in the module.
+
+.. _`marking individual tests when using parametrize`:
+
+Marking individual tests when using parametrize
+-----------------------------------------------
+
+When using parametrize, applying a mark will make it apply
+to each individual test. However it is also possible to
+apply a marker to an individual test instance::
+
+ import pytest
+
+ @pytest.mark.foo
+ @pytest.mark.parametrize(("n", "expected"), [
+ (1, 2),
+ pytest.mark.bar((1, 3)),
+ (2, 3),
+ ])
+ def test_increment(n, expected):
+ assert n + 1 == expected
+
+In this example the mark "foo" will apply to each of the three
+tests, whereas the "bar" mark is only applied to the second test.
+Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`.
+
+.. note::
+
+ If the data you are parametrizing happen to be single callables, you need to be careful
+ when marking these items. `pytest.mark.xfail(my_func)` won't work because it's also the
+ signature of a function being decorated. To resolve this ambiguity, you need to pass a
+ reason argument:
+ `pytest.mark.xfail(func_bar, reason="Issue#7")`.
+
+
+.. _`adding a custom marker from a plugin`:
+
+Custom marker and command line option to control test runs
+----------------------------------------------------------
+
+.. regendoc:wipe
+
+Plugins can provide custom markers and implement specific behaviour
+based on it. This is a self-contained example which adds a command
+line option and a parametrized test function marker to run tests
+specifies via named environments::
+
+ # content of conftest.py
+
+ import pytest
+ def pytest_addoption(parser):
+ parser.addoption("-E", action="store", metavar="NAME",
+ help="only run tests matching the environment NAME.")
+
+ def pytest_configure(config):
+ # register an additional marker
+ config.addinivalue_line("markers",
+ "env(name): mark test to run only on named environment")
+
+ def pytest_runtest_setup(item):
+ envmarker = item.get_marker("env")
+ if envmarker is not None:
+ envname = envmarker.args[0]
+ if envname != item.config.getoption("-E"):
+ pytest.skip("test requires env %r" % envname)
+
+A test file using this local plugin::
+
+ # content of test_someenv.py
+
+ import pytest
+ @pytest.mark.env("stage1")
+ def test_basic_db_operation():
+ pass
+
+and an example invocations specifying a different environment than what
+the test needs::
+
+ $ py.test -E stage2
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_someenv.py s
+
+ ======= 1 skipped in 0.12 seconds ========
+
+and here is one that specifies exactly the environment needed::
+
+ $ py.test -E stage1
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+
+ test_someenv.py .
+
+ ======= 1 passed in 0.12 seconds ========
+
+The ``--markers`` option always gives you a list of available markers::
+
+ $ py.test --markers
+ @pytest.mark.env(name): mark test to run only on named environment
+
+ @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
+
+ @pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
+
+ @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
+
+ @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
+
+ @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
+
+ @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
+
+
+Reading markers which were set from multiple places
+----------------------------------------------------
+
+.. versionadded: 2.2.2
+
+.. regendoc:wipe
+
+If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin
+code you can read over all such settings. Example::
+
+ # content of test_mark_three_times.py
+ import pytest
+ pytestmark = pytest.mark.glob("module", x=1)
+
+ @pytest.mark.glob("class", x=2)
+ class TestClass:
+ @pytest.mark.glob("function", x=3)
+ def test_something(self):
+ pass
+
+Here we have the marker "glob" applied three times to the same
+test function. From a conftest file we can read it like this::
+
+ # content of conftest.py
+ import sys
+
+ def pytest_runtest_setup(item):
+ g = item.get_marker("glob")
+ if g is not None:
+ for info in g:
+ print ("glob args=%s kwargs=%s" %(info.args, info.kwargs))
+ sys.stdout.flush()
+
+Let's run this without capturing output and see what we get::
+
+ $ py.test -q -s
+ glob args=('function',) kwargs={'x': 3}
+ glob args=('class',) kwargs={'x': 2}
+ glob args=('module',) kwargs={'x': 1}
+ .
+ 1 passed in 0.12 seconds
+
+marking platform specific tests with pytest
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Consider you have a test suite which marks tests for particular platforms,
+namely ``pytest.mark.darwin``, ``pytest.mark.win32`` etc. and you
+also have tests that run on all platforms and have no specific
+marker. If you now want to have a way to only run the tests
+for your particular platform, you could use the following plugin::
+
+ # content of conftest.py
+ #
+ import sys
+ import pytest
+
+ ALL = set("darwin linux2 win32".split())
+
+ def pytest_runtest_setup(item):
+ if isinstance(item, item.Function):
+ plat = sys.platform
+ if not item.get_marker(plat):
+ if ALL.intersection(item.keywords):
+ pytest.skip("cannot run on platform %s" %(plat))
+
+then tests will be skipped if they were specified for a different platform.
+Let's do a little test file to show how this looks like::
+
+ # content of test_plat.py
+
+ import pytest
+
+ @pytest.mark.darwin
+ def test_if_apple_is_evil():
+ pass
+
+ @pytest.mark.linux2
+ def test_if_linux_works():
+ pass
+
+ @pytest.mark.win32
+ def test_if_win32_crashes():
+ pass
+
+ def test_runs_everywhere():
+ pass
+
+then you will see two test skipped and two executed tests as expected::
+
+ $ py.test -rs # this option reports skip reasons
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_plat.py sss.
+ ======= short test summary info ========
+ SKIP [3] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux
+
+ ======= 1 passed, 3 skipped in 0.12 seconds ========
+
+Note that if you specify a platform via the marker-command line option like this::
+
+ $ py.test -m linux2
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_plat.py s
+
+ ======= 3 tests deselected by "-m 'linux2'" ========
+ ======= 1 skipped, 3 deselected in 0.12 seconds ========
+
+then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
+
+Automatically adding markers based on test names
+--------------------------------------------------------
+
+.. regendoc:wipe
+
+If you a test suite where test function names indicate a certain
+type of test, you can implement a hook that automatically defines
+markers so that you can use the ``-m`` option with it. Let's look
+at this test module::
+
+ # content of test_module.py
+
+ def test_interface_simple():
+ assert 0
+
+ def test_interface_complex():
+ assert 0
+
+ def test_event_simple():
+ assert 0
+
+ def test_something_else():
+ assert 0
+
+We want to dynamically define two markers and can do it in a
+``conftest.py`` plugin::
+
+ # content of conftest.py
+
+ import pytest
+ def pytest_collection_modifyitems(items):
+ for item in items:
+ if "interface" in item.nodeid:
+ item.add_marker(pytest.mark.interface)
+ elif "event" in item.nodeid:
+ item.add_marker(pytest.mark.event)
+
+We can now use the ``-m option`` to select one set::
+
+ $ py.test -m interface --tb=short
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_module.py FF
+
+ ======= FAILURES ========
+ _______ test_interface_simple ________
+ test_module.py:3: in test_interface_simple
+ assert 0
+ E assert 0
+ _______ test_interface_complex ________
+ test_module.py:6: in test_interface_complex
+ assert 0
+ E assert 0
+ ======= 2 tests deselected by "-m 'interface'" ========
+ ======= 2 failed, 2 deselected in 0.12 seconds ========
+
+or to select both "event" and "interface" tests::
+
+ $ py.test -m "interface or event" --tb=short
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_module.py FFF
+
+ ======= FAILURES ========
+ _______ test_interface_simple ________
+ test_module.py:3: in test_interface_simple
+ assert 0
+ E assert 0
+ _______ test_interface_complex ________
+ test_module.py:6: in test_interface_complex
+ assert 0
+ E assert 0
+ _______ test_event_simple ________
+ test_module.py:9: in test_event_simple
+ assert 0
+ E assert 0
+ ======= 1 tests deselected by "-m 'interface or event'" ========
+ ======= 3 failed, 1 deselected in 0.12 seconds ========
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py b/testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py
new file mode 100644
index 000000000..66a368a12
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/multipython.py
@@ -0,0 +1,52 @@
+"""
+module containing a parametrized tests testing cross-python
+serialization via the pickle module.
+"""
+import py
+import pytest
+import _pytest._code
+
+pythonlist = ['python2.6', 'python2.7', 'python3.3']
+@pytest.fixture(params=pythonlist)
+def python1(request, tmpdir):
+ picklefile = tmpdir.join("data.pickle")
+ return Python(request.param, picklefile)
+
+@pytest.fixture(params=pythonlist)
+def python2(request, python1):
+ return Python(request.param, python1.picklefile)
+
+class Python:
+ def __init__(self, version, picklefile):
+ self.pythonpath = py.path.local.sysfind(version)
+ if not self.pythonpath:
+ pytest.skip("%r not found" %(version,))
+ self.picklefile = picklefile
+ def dumps(self, obj):
+ dumpfile = self.picklefile.dirpath("dump.py")
+ dumpfile.write(_pytest._code.Source("""
+ import pickle
+ f = open(%r, 'wb')
+ s = pickle.dump(%r, f, protocol=2)
+ f.close()
+ """ % (str(self.picklefile), obj)))
+ py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
+
+ def load_and_is_true(self, expression):
+ loadfile = self.picklefile.dirpath("load.py")
+ loadfile.write(_pytest._code.Source("""
+ import pickle
+ f = open(%r, 'rb')
+ obj = pickle.load(f)
+ f.close()
+ res = eval(%r)
+ if not res:
+ raise SystemExit(1)
+ """ % (str(self.picklefile), expression)))
+ print (loadfile)
+ py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))
+
+@pytest.mark.parametrize("obj", [42, {}, {1:3},])
+def test_basic_objects(python1, python2, obj):
+ python1.dumps(obj)
+ python2.load_and_is_true("obj == %s" % obj)
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst
new file mode 100644
index 000000000..6437e3984
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython.rst
@@ -0,0 +1,91 @@
+
+.. _`non-python tests`:
+
+Working with non-python tests
+====================================================
+
+.. _`yaml plugin`:
+
+A basic example for specifying tests in Yaml files
+--------------------------------------------------------------
+
+.. _`pytest-yamlwsgi`: http://bitbucket.org/aafshar/pytest-yamlwsgi/src/tip/pytest_yamlwsgi.py
+.. _`PyYAML`: http://pypi.python.org/pypi/PyYAML/
+
+Here is an example ``conftest.py`` (extracted from Ali Afshnars special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yml`` files and will execute the yaml-formatted content as custom tests:
+
+.. include:: nonpython/conftest.py
+ :literal:
+
+You can create a simple example file:
+
+.. include:: nonpython/test_simple.yml
+ :literal:
+
+and if you installed `PyYAML`_ or a compatible YAML-parser you can
+now execute the test specification::
+
+ nonpython $ py.test test_simple.yml
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
+ collected 2 items
+
+ test_simple.yml F.
+
+ ======= FAILURES ========
+ _______ usecase: hello ________
+ usecase execution failed
+ spec failed: 'some': 'other'
+ no further details known at this point.
+ ======= 1 failed, 1 passed in 0.12 seconds ========
+
+.. regendoc:wipe
+
+You get one dot for the passing ``sub1: sub1`` check and one failure.
+Obviously in the above ``conftest.py`` you'll want to implement a more
+interesting interpretation of the yaml-values. You can easily write
+your own domain specific testing language this way.
+
+.. note::
+
+ ``repr_failure(excinfo)`` is called for representing test failures.
+ If you create custom collection nodes you can return an error
+ representation string of your choice. It
+ will be reported as a (red) string.
+
+``reportinfo()`` is used for representing the test location and is also
+consulted when reporting in ``verbose`` mode::
+
+ nonpython $ py.test -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
+ collecting ... collected 2 items
+
+ test_simple.yml::hello FAILED
+ test_simple.yml::ok PASSED
+
+ ======= FAILURES ========
+ _______ usecase: hello ________
+ usecase execution failed
+ spec failed: 'some': 'other'
+ no further details known at this point.
+ ======= 1 failed, 1 passed in 0.12 seconds ========
+
+.. regendoc:wipe
+
+While developing your custom test collection and execution it's also
+interesting to just look at the collection tree::
+
+ nonpython $ py.test --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
+ collected 2 items
+ <YamlFile 'test_simple.yml'>
+ <YamlItem 'hello'>
+ <YamlItem 'ok'>
+
+ ======= no tests ran in 0.12 seconds ========
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/__init__.py
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py
new file mode 100644
index 000000000..2406e8f10
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/conftest.py
@@ -0,0 +1,40 @@
+# content of conftest.py
+
+import pytest
+
+def pytest_collect_file(parent, path):
+ if path.ext == ".yml" and path.basename.startswith("test"):
+ return YamlFile(path, parent)
+
+class YamlFile(pytest.File):
+ def collect(self):
+ import yaml # we need a yaml parser, e.g. PyYAML
+ raw = yaml.safe_load(self.fspath.open())
+ for name, spec in raw.items():
+ yield YamlItem(name, self, spec)
+
+class YamlItem(pytest.Item):
+ def __init__(self, name, parent, spec):
+ super(YamlItem, self).__init__(name, parent)
+ self.spec = spec
+
+ def runtest(self):
+ for name, value in self.spec.items():
+ # some custom test execution (dumb example follows)
+ if name != value:
+ raise YamlException(self, name, value)
+
+ def repr_failure(self, excinfo):
+ """ called when self.runtest() raises an exception. """
+ if isinstance(excinfo.value, YamlException):
+ return "\n".join([
+ "usecase execution failed",
+ " spec failed: %r: %r" % excinfo.value.args[1:3],
+ " no further details known at this point."
+ ])
+
+ def reportinfo(self):
+ return self.fspath, 0, "usecase: %s" % self.name
+
+class YamlException(Exception):
+ """ custom exception for error reporting. """
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml
new file mode 100644
index 000000000..f0d8d11fc
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/nonpython/test_simple.yml
@@ -0,0 +1,7 @@
+# test_simple.yml
+ok:
+ sub1: sub1
+
+hello:
+ world: world
+ some: other
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst
new file mode 100644
index 000000000..5d637ffcb
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/parametrize.rst
@@ -0,0 +1,475 @@
+
+.. _paramexamples:
+
+Parametrizing tests
+=================================================
+
+.. currentmodule:: _pytest.python
+
+``pytest`` allows to easily parametrize test functions.
+For basic docs, see :ref:`parametrize-basics`.
+
+In the following we provide some examples using
+the builtin mechanisms.
+
+Generating parameters combinations, depending on command line
+----------------------------------------------------------------------------
+
+.. regendoc:wipe
+
+Let's say we want to execute a test with different computation
+parameters and the parameter range shall be determined by a command
+line argument. Let's first write a simple (do-nothing) computation test::
+
+ # content of test_compute.py
+
+ def test_compute(param1):
+ assert param1 < 4
+
+Now we add a test configuration like this::
+
+ # content of conftest.py
+
+ def pytest_addoption(parser):
+ parser.addoption("--all", action="store_true",
+ help="run all combinations")
+
+ def pytest_generate_tests(metafunc):
+ if 'param1' in metafunc.fixturenames:
+ if metafunc.config.option.all:
+ end = 5
+ else:
+ end = 2
+ metafunc.parametrize("param1", range(end))
+
+This means that we only run 2 tests if we do not pass ``--all``::
+
+ $ py.test -q test_compute.py
+ ..
+ 2 passed in 0.12 seconds
+
+We run only two computations, so we see two dots.
+let's run the full monty::
+
+ $ py.test -q --all
+ ....F
+ ======= FAILURES ========
+ _______ test_compute[4] ________
+
+ param1 = 4
+
+ def test_compute(param1):
+ > assert param1 < 4
+ E assert 4 < 4
+
+ test_compute.py:3: AssertionError
+ 1 failed, 4 passed in 0.12 seconds
+
+As expected when running the full range of ``param1`` values
+we'll get an error on the last one.
+
+
+Different options for test IDs
+------------------------------------
+
+pytest will build a string that is the test ID for each set of values in a
+parametrized test. These IDs can be used with ``-k`` to select specific cases
+to run, and they will also identify the specific case when one is failing.
+Running pytest with ``--collect-only`` will show the generated IDs.
+
+Numbers, strings, booleans and None will have their usual string representation
+used in the test ID. For other objects, pytest will make a string based on
+the argument name::
+
+ # content of test_time.py
+
+ import pytest
+
+ from datetime import datetime, timedelta
+
+ testdata = [
+ (datetime(2001, 12, 12), datetime(2001, 12, 11), timedelta(1)),
+ (datetime(2001, 12, 11), datetime(2001, 12, 12), timedelta(-1)),
+ ]
+
+
+ @pytest.mark.parametrize("a,b,expected", testdata)
+ def test_timedistance_v0(a, b, expected):
+ diff = a - b
+ assert diff == expected
+
+
+ @pytest.mark.parametrize("a,b,expected", testdata, ids=["forward", "backward"])
+ def test_timedistance_v1(a, b, expected):
+ diff = a - b
+ assert diff == expected
+
+
+ def idfn(val):
+ if isinstance(val, (datetime,)):
+ # note this wouldn't show any hours/minutes/seconds
+ return val.strftime('%Y%m%d')
+
+
+ @pytest.mark.parametrize("a,b,expected", testdata, ids=idfn)
+ def test_timedistance_v2(a, b, expected):
+ diff = a - b
+ assert diff == expected
+
+
+In ``test_timedistance_v0``, we let pytest generate the test IDs.
+
+In ``test_timedistance_v1``, we specified ``ids`` as a list of strings which were
+used as the test IDs. These are succinct, but can be a pain to maintain.
+
+In ``test_timedistance_v2``, we specified ``ids`` as a function that can generate a
+string representation to make part of the test ID. So our ``datetime`` values use the
+label generated by ``idfn``, but because we didn't generate a label for ``timedelta``
+objects, they are still using the default pytest representation::
+
+
+ $ py.test test_time.py --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 6 items
+ <Module 'test_time.py'>
+ <Function 'test_timedistance_v0[a0-b0-expected0]'>
+ <Function 'test_timedistance_v0[a1-b1-expected1]'>
+ <Function 'test_timedistance_v1[forward]'>
+ <Function 'test_timedistance_v1[backward]'>
+ <Function 'test_timedistance_v2[20011212-20011211-expected0]'>
+ <Function 'test_timedistance_v2[20011211-20011212-expected1]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+A quick port of "testscenarios"
+------------------------------------
+
+.. _`test scenarios`: http://pypi.python.org/pypi/testscenarios/
+
+Here is a quick port to run tests configured with `test scenarios`_,
+an add-on from Robert Collins for the standard unittest framework. We
+only have to work a bit to construct the correct arguments for pytest's
+:py:func:`Metafunc.parametrize`::
+
+ # content of test_scenarios.py
+
+ def pytest_generate_tests(metafunc):
+ idlist = []
+ argvalues = []
+ for scenario in metafunc.cls.scenarios:
+ idlist.append(scenario[0])
+ items = scenario[1].items()
+ argnames = [x[0] for x in items]
+ argvalues.append(([x[1] for x in items]))
+ metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
+
+ scenario1 = ('basic', {'attribute': 'value'})
+ scenario2 = ('advanced', {'attribute': 'value2'})
+
+ class TestSampleWithScenarios:
+ scenarios = [scenario1, scenario2]
+
+ def test_demo1(self, attribute):
+ assert isinstance(attribute, str)
+
+ def test_demo2(self, attribute):
+ assert isinstance(attribute, str)
+
+this is a fully self-contained example which you can run with::
+
+ $ py.test test_scenarios.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_scenarios.py ....
+
+ ======= 4 passed in 0.12 seconds ========
+
+If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
+
+
+ $ py.test --collect-only test_scenarios.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+ <Module 'test_scenarios.py'>
+ <Class 'TestSampleWithScenarios'>
+ <Instance '()'>
+ <Function 'test_demo1[basic]'>
+ <Function 'test_demo2[basic]'>
+ <Function 'test_demo1[advanced]'>
+ <Function 'test_demo2[advanced]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+Note that we told ``metafunc.parametrize()`` that your scenario values
+should be considered class-scoped. With pytest-2.3 this leads to a
+resource-based ordering.
+
+Deferring the setup of parametrized resources
+---------------------------------------------------
+
+.. regendoc:wipe
+
+The parametrization of test functions happens at collection
+time. It is a good idea to setup expensive resources like DB
+connections or subprocess only when the actual test is run.
+Here is a simple example how you can achieve that, first
+the actual test requiring a ``db`` object::
+
+ # content of test_backends.py
+
+ import pytest
+ def test_db_initialized(db):
+ # a dummy test
+ if db.__class__.__name__ == "DB2":
+ pytest.fail("deliberately failing for demo purposes")
+
+We can now add a test configuration that generates two invocations of
+the ``test_db_initialized`` function and also implements a factory that
+creates a database object for the actual test invocations::
+
+ # content of conftest.py
+ import pytest
+
+ def pytest_generate_tests(metafunc):
+ if 'db' in metafunc.fixturenames:
+ metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
+
+ class DB1:
+ "one database object"
+ class DB2:
+ "alternative database object"
+
+ @pytest.fixture
+ def db(request):
+ if request.param == "d1":
+ return DB1()
+ elif request.param == "d2":
+ return DB2()
+ else:
+ raise ValueError("invalid internal test config")
+
+Let's first see how it looks like at collection time::
+
+ $ py.test test_backends.py --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+ <Module 'test_backends.py'>
+ <Function 'test_db_initialized[d1]'>
+ <Function 'test_db_initialized[d2]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+And then when we run the test::
+
+ $ py.test -q test_backends.py
+ .F
+ ======= FAILURES ========
+ _______ test_db_initialized[d2] ________
+
+ db = <conftest.DB2 object at 0xdeadbeef>
+
+ def test_db_initialized(db):
+ # a dummy test
+ if db.__class__.__name__ == "DB2":
+ > pytest.fail("deliberately failing for demo purposes")
+ E Failed: deliberately failing for demo purposes
+
+ test_backends.py:6: Failed
+ 1 failed, 1 passed in 0.12 seconds
+
+The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
+
+.. regendoc:wipe
+
+Apply indirect on particular arguments
+---------------------------------------------------
+
+Very often parametrization uses more than one argument name. There is opportunity to apply ``indirect``
+parameter on particular arguments. It can be done by passing list or tuple of
+arguments' names to ``indirect``. In the example below there is a function ``test_indirect`` which uses
+two fixtures: ``x`` and ``y``. Here we give to indirect the list, which contains the name of the
+fixture ``x``. The indirect parameter will be applied to this argument only, and the value ``a``
+will be passed to respective fixture function::
+
+ # content of test_indirect_list.py
+
+ import pytest
+ @pytest.fixture(scope='function')
+ def x(request):
+ return request.param * 3
+
+ @pytest.fixture(scope='function')
+ def y(request):
+ return request.param * 2
+
+ @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])
+ def test_indirect(x,y):
+ assert x == 'aaa'
+ assert y == 'b'
+
+The result of this test will be successful::
+
+ $ py.test test_indirect_list.py --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 1 items
+ <Module 'test_indirect_list.py'>
+ <Function 'test_indirect[a-b]'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. regendoc:wipe
+
+Parametrizing test methods through per-class configuration
+--------------------------------------------------------------
+
+.. _`unittest parametrizer`: https://github.com/testing-cabal/unittest-ext/blob/master/params.py
+
+
+Here is an example ``pytest_generate_function`` function implementing a
+parametrization scheme similar to Michael Foord's `unittest
+parametrizer`_ but in a lot less code::
+
+ # content of ./test_parametrize.py
+ import pytest
+
+ def pytest_generate_tests(metafunc):
+ # called once per each test function
+ funcarglist = metafunc.cls.params[metafunc.function.__name__]
+ argnames = list(funcarglist[0])
+ metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
+ for funcargs in funcarglist])
+
+ class TestClass:
+ # a map specifying multiple argument sets for a test method
+ params = {
+ 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
+ 'test_zerodivision': [dict(a=1, b=0), ],
+ }
+
+ def test_equals(self, a, b):
+ assert a == b
+
+ def test_zerodivision(self, a, b):
+ pytest.raises(ZeroDivisionError, "a/b")
+
+Our test generator looks up a class-level definition which specifies which
+argument sets to use for each test function. Let's run it::
+
+ $ py.test -q
+ F..
+ ======= FAILURES ========
+ _______ TestClass.test_equals[1-2] ________
+
+ self = <test_parametrize.TestClass object at 0xdeadbeef>, a = 1, b = 2
+
+ def test_equals(self, a, b):
+ > assert a == b
+ E assert 1 == 2
+
+ test_parametrize.py:18: AssertionError
+ 1 failed, 2 passed in 0.12 seconds
+
+Indirect parametrization with multiple fixtures
+--------------------------------------------------------------
+
+Here is a stripped down real-life example of using parametrized
+testing for testing serialization of objects between different python
+interpreters. We define a ``test_basic_objects`` function which
+is to be run with different sets of arguments for its three arguments:
+
+* ``python1``: first python interpreter, run to pickle-dump an object to a file
+* ``python2``: second interpreter, run to pickle-load an object from a file
+* ``obj``: object to be dumped/loaded
+
+.. literalinclude:: multipython.py
+
+Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
+
+ . $ py.test -rs -q multipython.py
+ ssssssssssss...ssssssssssss
+ ======= short test summary info ========
+ SKIP [12] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python3.3' not found
+ SKIP [12] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python2.6' not found
+ 3 passed, 24 skipped in 0.12 seconds
+
+Indirect parametrization of optional implementations/imports
+--------------------------------------------------------------------
+
+If you want to compare the outcomes of several implementations of a given
+API, you can write test functions that receive the already imported implementations
+and get skipped in case the implementation is not importable/available. Let's
+say we have a "base" implementation and the other (possibly optimized ones)
+need to provide similar results::
+
+ # content of conftest.py
+
+ import pytest
+
+ @pytest.fixture(scope="session")
+ def basemod(request):
+ return pytest.importorskip("base")
+
+ @pytest.fixture(scope="session", params=["opt1", "opt2"])
+ def optmod(request):
+ return pytest.importorskip(request.param)
+
+And then a base implementation of a simple function::
+
+ # content of base.py
+ def func1():
+ return 1
+
+And an optimized version::
+
+ # content of opt1.py
+ def func1():
+ return 1.0001
+
+And finally a little test module::
+
+ # content of test_module.py
+
+ def test_func1(basemod, optmod):
+ assert round(basemod.func1(), 3) == round(optmod.func1(), 3)
+
+
+If you run this with reporting for skips enabled::
+
+ $ py.test -rs test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py .s
+ ======= short test summary info ========
+ SKIP [1] $REGENDOC_TMPDIR/conftest.py:10: could not import 'opt2'
+
+ ======= 1 passed, 1 skipped in 0.12 seconds ========
+
+You'll see that we don't have a ``opt2`` module and thus the second test run
+of our ``test_func1`` was skipped. A few notes:
+
+- the fixture functions in the ``conftest.py`` file are "session-scoped" because we
+ don't need to import more than once
+
+- if you have multiple test functions and a skipped import, you will see
+ the ``[1]`` count increasing in the report
+
+- you can put :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>` style
+ parametrization on the test functions to parametrize input/output
+ values as well.
+
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py
new file mode 100644
index 000000000..81cd1fb11
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/conftest.py
@@ -0,0 +1,16 @@
+import sys
+import pytest
+
+py3 = sys.version_info[0] >= 3
+
+class DummyCollector(pytest.collect.File):
+ def collect(self):
+ return []
+
+def pytest_pycollect_makemodule(path, parent):
+ bn = path.basename
+ if "py3" in bn and not py3 or ("py2" in bn and py3):
+ return DummyCollector(path, parent=parent)
+
+
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py
new file mode 100644
index 000000000..e09ed9466
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py2.py
@@ -0,0 +1,7 @@
+
+def test_exception_syntax():
+ try:
+ 0/0
+ except ZeroDivisionError, e:
+ pass
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py
new file mode 100644
index 000000000..a811f2bbc
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/py2py3/test_py3.py
@@ -0,0 +1,7 @@
+
+def test_exception_syntax():
+ try:
+ 0/0
+ except ZeroDivisionError as e:
+ pass
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py
new file mode 100644
index 000000000..05858eb85
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.py
@@ -0,0 +1,11 @@
+
+# run this with $ py.test --collect-only test_collectonly.py
+#
+def test_function():
+ pass
+
+class TestClass:
+ def test_method(self):
+ pass
+ def test_anothermethod(self):
+ pass
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst
new file mode 100644
index 000000000..5faf4c6c8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/pythoncollection.rst
@@ -0,0 +1,192 @@
+Changing standard (Python) test discovery
+===============================================
+
+Ignore paths during test collection
+-----------------------------------
+
+You can easily ignore certain test directories and modules during collection
+by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple
+``--ignore`` options. Example::
+
+ tests/
+ ├── example
+ │   ├── test_example_01.py
+ │   ├── test_example_02.py
+ │   └── test_example_03.py
+ ├── foobar
+ │   ├── test_foobar_01.py
+ │   ├── test_foobar_02.py
+ │   └── test_foobar_03.py
+ └── hello
+ └── world
+ ├── test_world_01.py
+ ├── test_world_02.py
+ └── test_world_03.py
+
+Now if you invoke ``pytest`` with ``--ignore=tests/foobar/test_foobar_03.py --ignore=tests/hello/``,
+you will see that ``pytest`` only collects test-modules, which do not match the patterns specified::
+
+ ========= test session starts ==========
+ platform darwin -- Python 2.7.10, pytest-2.8.2, py-1.4.30, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 5 items
+
+ tests/example/test_example_01.py .
+ tests/example/test_example_02.py .
+ tests/example/test_example_03.py .
+ tests/foobar/test_foobar_01.py .
+ tests/foobar/test_foobar_02.py .
+
+ ======= 5 passed in 0.02 seconds =======
+
+
+Changing directory recursion
+-----------------------------------------------------
+
+You can set the :confval:`norecursedirs` option in an ini-file, for example your ``setup.cfg`` in the project root directory::
+
+ # content of setup.cfg
+ [pytest]
+ norecursedirs = .svn _build tmp*
+
+This would tell ``pytest`` to not recurse into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory.
+
+.. _`change naming conventions`:
+
+Changing naming conventions
+-----------------------------------------------------
+
+You can configure different naming conventions by setting
+the :confval:`python_files`, :confval:`python_classes` and
+:confval:`python_functions` configuration options. Example::
+
+ # content of setup.cfg
+ # can also be defined in in tox.ini or pytest.ini file
+ [pytest]
+ python_files=check_*.py
+ python_classes=Check
+ python_functions=*_check
+
+This would make ``pytest`` look for tests in files that match the ``check_*
+.py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods
+that match ``*_check``. For example, if we have::
+
+ # content of check_myapp.py
+ class CheckMyApp:
+ def simple_check(self):
+ pass
+ def complex_check(self):
+ pass
+
+then the test collection looks like this::
+
+ $ py.test --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile: setup.cfg
+ collected 2 items
+ <Module 'check_myapp.py'>
+ <Class 'CheckMyApp'>
+ <Instance '()'>
+ <Function 'simple_check'>
+ <Function 'complex_check'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. note::
+
+ the ``python_functions`` and ``python_classes`` options has no effect
+ for ``unittest.TestCase`` test discovery because pytest delegates
+ detection of test case methods to unittest code.
+
+Interpreting cmdline arguments as Python packages
+-----------------------------------------------------
+
+You can use the ``--pyargs`` option to make ``pytest`` try
+interpreting arguments as python package names, deriving
+their file system path and then running the test. For
+example if you have unittest2 installed you can type::
+
+ py.test --pyargs unittest2.test.test_skipping -q
+
+which would run the respective test module. Like with
+other options, through an ini-file and the :confval:`addopts` option you
+can make this change more permanently::
+
+ # content of pytest.ini
+ [pytest]
+ addopts = --pyargs
+
+Now a simple invocation of ``py.test NAME`` will check
+if NAME exists as an importable package/module and otherwise
+treat it as a filesystem path.
+
+Finding out what is collected
+-----------------------------------------------
+
+You can always peek at the collection tree without running tests like this::
+
+ . $ py.test --collect-only pythoncollection.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
+ collected 3 items
+ <Module 'CWD/pythoncollection.py'>
+ <Function 'test_function'>
+ <Class 'TestClass'>
+ <Instance '()'>
+ <Function 'test_method'>
+ <Function 'test_anothermethod'>
+
+ ======= no tests ran in 0.12 seconds ========
+
+customizing test collection to find all .py files
+---------------------------------------------------------
+
+.. regendoc:wipe
+
+You can easily instruct ``pytest`` to discover tests from every python file::
+
+
+ # content of pytest.ini
+ [pytest]
+ python_files = *.py
+
+However, many projects will have a ``setup.py`` which they don't want to be imported. Moreover, there may files only importable by a specific python version.
+For such cases you can dynamically define files to be ignored by listing
+them in a ``conftest.py`` file::
+
+ # content of conftest.py
+ import sys
+
+ collect_ignore = ["setup.py"]
+ if sys.version_info[0] > 2:
+ collect_ignore.append("pkg/module_py2.py")
+
+And then if you have a module file like this::
+
+ # content of pkg/module_py2.py
+ def test_only_on_python2():
+ try:
+ assert 0
+ except Exception, e:
+ pass
+
+and a setup.py dummy file like this::
+
+ # content of setup.py
+ 0/0 # will raise exception if imported
+
+then a pytest run on python2 will find the one test when run with a python2
+interpreters and will leave out the setup.py file::
+
+ $ py.test --collect-only
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection.
+
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst
new file mode 100644
index 000000000..28624aa07
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/reportingdemo.rst
@@ -0,0 +1,598 @@
+
+.. _`tbreportdemo`:
+
+Demo of Python failure reports with pytest
+==================================================
+
+Here is a nice run of several tens of failures
+and how ``pytest`` presents things (unfortunately
+not showing the nice colors here in the HTML that you
+get on the terminal - we are working on that):
+
+.. code-block:: python
+
+ assertion $ py.test failure_demo.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR/assertion, inifile:
+ collected 42 items
+
+ failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
+
+ ======= FAILURES ========
+ _______ test_generative[0] ________
+
+ param1 = 3, param2 = 6
+
+ def test_generative(param1, param2):
+ > assert param1 * 2 < param2
+ E assert (3 * 2) < 6
+
+ failure_demo.py:16: AssertionError
+ _______ TestFailing.test_simple ________
+
+ self = <failure_demo.TestFailing object at 0xdeadbeef>
+
+ def test_simple(self):
+ def f():
+ return 42
+ def g():
+ return 43
+
+ > assert f() == g()
+ E assert 42 == 43
+ E + where 42 = <function TestFailing.test_simple.<locals>.f at 0xdeadbeef>()
+ E + and 43 = <function TestFailing.test_simple.<locals>.g at 0xdeadbeef>()
+
+ failure_demo.py:29: AssertionError
+ _______ TestFailing.test_simple_multiline ________
+
+ self = <failure_demo.TestFailing object at 0xdeadbeef>
+
+ def test_simple_multiline(self):
+ otherfunc_multi(
+ 42,
+ > 6*9)
+
+ failure_demo.py:34:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ a = 42, b = 54
+
+ def otherfunc_multi(a,b):
+ > assert (a ==
+ b)
+ E assert 42 == 54
+
+ failure_demo.py:12: AssertionError
+ _______ TestFailing.test_not ________
+
+ self = <failure_demo.TestFailing object at 0xdeadbeef>
+
+ def test_not(self):
+ def f():
+ return 42
+ > assert not f()
+ E assert not 42
+ E + where 42 = <function TestFailing.test_not.<locals>.f at 0xdeadbeef>()
+
+ failure_demo.py:39: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_text(self):
+ > assert 'spam' == 'eggs'
+ E assert 'spam' == 'eggs'
+ E - spam
+ E + eggs
+
+ failure_demo.py:43: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_similar_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_similar_text(self):
+ > assert 'foo 1 bar' == 'foo 2 bar'
+ E assert 'foo 1 bar' == 'foo 2 bar'
+ E - foo 1 bar
+ E ? ^
+ E + foo 2 bar
+ E ? ^
+
+ failure_demo.py:46: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_multiline_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_multiline_text(self):
+ > assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+ E assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+ E foo
+ E - spam
+ E + eggs
+ E bar
+
+ failure_demo.py:49: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_long_text ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_long_text(self):
+ a = '1'*100 + 'a' + '2'*100
+ b = '1'*100 + 'b' + '2'*100
+ > assert a == b
+ E assert '111111111111...2222222222222' == '1111111111111...2222222222222'
+ E Skipping 90 identical leading characters in diff, use -v to show
+ E Skipping 91 identical trailing characters in diff, use -v to show
+ E - 1111111111a222222222
+ E ? ^
+ E + 1111111111b222222222
+ E ? ^
+
+ failure_demo.py:54: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_long_text_multiline ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_long_text_multiline(self):
+ a = '1\n'*100 + 'a' + '2\n'*100
+ b = '1\n'*100 + 'b' + '2\n'*100
+ > assert a == b
+ E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n'
+ E Skipping 190 identical leading characters in diff, use -v to show
+ E Skipping 191 identical trailing characters in diff, use -v to show
+ E 1
+ E 1
+ E 1
+ E 1
+ E 1
+ E - a2
+ E + b2
+ E 2
+ E 2
+ E 2
+ E 2
+
+ failure_demo.py:59: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_list ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_list(self):
+ > assert [0, 1, 2] == [0, 1, 3]
+ E assert [0, 1, 2] == [0, 1, 3]
+ E At index 2 diff: 2 != 3
+ E Use -v to get the full diff
+
+ failure_demo.py:62: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_list_long ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_list_long(self):
+ a = [0]*100 + [1] + [3]*100
+ b = [0]*100 + [2] + [3]*100
+ > assert a == b
+ E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...]
+ E At index 100 diff: 1 != 2
+ E Use -v to get the full diff
+
+ failure_demo.py:67: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_dict ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_dict(self):
+ > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
+ E assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
+ E Omitting 1 identical items, use -v to show
+ E Differing items:
+ E {'b': 1} != {'b': 2}
+ E Left contains more items:
+ E {'c': 0}
+ E Right contains more items:
+ E {'d': 0}
+ E Use -v to get the full diff
+
+ failure_demo.py:70: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_set ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_set(self):
+ > assert set([0, 10, 11, 12]) == set([0, 20, 21])
+ E assert set([0, 10, 11, 12]) == set([0, 20, 21])
+ E Extra items in the left set:
+ E 10
+ E 11
+ E 12
+ E Extra items in the right set:
+ E 20
+ E 21
+ E Use -v to get the full diff
+
+ failure_demo.py:73: AssertionError
+ _______ TestSpecialisedExplanations.test_eq_longer_list ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_eq_longer_list(self):
+ > assert [1,2] == [1,2,3]
+ E assert [1, 2] == [1, 2, 3]
+ E Right contains more items, first extra item: 3
+ E Use -v to get the full diff
+
+ failure_demo.py:76: AssertionError
+ _______ TestSpecialisedExplanations.test_in_list ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_in_list(self):
+ > assert 1 in [0, 2, 3, 4, 5]
+ E assert 1 in [0, 2, 3, 4, 5]
+
+ failure_demo.py:79: AssertionError
+ _______ TestSpecialisedExplanations.test_not_in_text_multiline ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_multiline(self):
+ text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
+ > assert 'foo' not in text
+ E assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail'
+ E 'foo' is contained here:
+ E some multiline
+ E text
+ E which
+ E includes foo
+ E ? +++
+ E and a
+ E tail
+
+ failure_demo.py:83: AssertionError
+ _______ TestSpecialisedExplanations.test_not_in_text_single ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_single(self):
+ text = 'single foo line'
+ > assert 'foo' not in text
+ E assert 'foo' not in 'single foo line'
+ E 'foo' is contained here:
+ E single foo line
+ E ? +++
+
+ failure_demo.py:87: AssertionError
+ _______ TestSpecialisedExplanations.test_not_in_text_single_long ________
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_single_long(self):
+ text = 'head ' * 50 + 'foo ' + 'tail ' * 20
+ > assert 'foo' not in text
+ E assert 'foo' not in 'head head head head hea...ail tail tail tail tail '
+ E 'foo' is contained here:
+ E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
+ E ? +++
+
+ failure_demo.py:91: AssertionError
+ ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
+
+ self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
+
+ def test_not_in_text_single_long_term(self):
+ text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
+ > assert 'f'*70 not in text
+ E assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail '
+ E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
+ E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
+ E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+ failure_demo.py:95: AssertionError
+ _______ test_attribute ________
+
+ def test_attribute():
+ class Foo(object):
+ b = 1
+ i = Foo()
+ > assert i.b == 2
+ E assert 1 == 2
+ E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0xdeadbeef>.b
+
+ failure_demo.py:102: AssertionError
+ _______ test_attribute_instance ________
+
+ def test_attribute_instance():
+ class Foo(object):
+ b = 1
+ > assert Foo().b == 2
+ E assert 1 == 2
+ E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef>.b
+ E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
+
+ failure_demo.py:108: AssertionError
+ _______ test_attribute_failure ________
+
+ def test_attribute_failure():
+ class Foo(object):
+ def _get_b(self):
+ raise Exception('Failed to get attrib')
+ b = property(_get_b)
+ i = Foo()
+ > assert i.b == 2
+
+ failure_demo.py:117:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0xdeadbeef>
+
+ def _get_b(self):
+ > raise Exception('Failed to get attrib')
+ E Exception: Failed to get attrib
+
+ failure_demo.py:114: Exception
+ _______ test_attribute_multiple ________
+
+ def test_attribute_multiple():
+ class Foo(object):
+ b = 1
+ class Bar(object):
+ b = 2
+ > assert Foo().b == Bar().b
+ E assert 1 == 2
+ E + where 1 = <failure_demo.test_attribute_multiple.<locals>.Foo object at 0xdeadbeef>.b
+ E + where <failure_demo.test_attribute_multiple.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Foo'>()
+ E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef>.b
+ E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
+
+ failure_demo.py:125: AssertionError
+ _______ TestRaises.test_raises ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_raises(self):
+ s = 'qwe'
+ > raises(TypeError, "int(s)")
+
+ failure_demo.py:134:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ > int(s)
+ E ValueError: invalid literal for int() with base 10: 'qwe'
+
+ <0-codegen $PYTHON_PREFIX/lib/python3.4/site-packages/_pytest/python.py:1302>:1: ValueError
+ _______ TestRaises.test_raises_doesnt ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_raises_doesnt(self):
+ > raises(IOError, "int('3')")
+ E Failed: DID NOT RAISE <class 'OSError'>
+
+ failure_demo.py:137: Failed
+ _______ TestRaises.test_raise ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_raise(self):
+ > raise ValueError("demo error")
+ E ValueError: demo error
+
+ failure_demo.py:140: ValueError
+ _______ TestRaises.test_tupleerror ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_tupleerror(self):
+ > a,b = [1]
+ E ValueError: need more than 1 value to unpack
+
+ failure_demo.py:143: ValueError
+ ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
+ l = [1,2,3]
+ print ("l is %r" % l)
+ > a,b = l.pop()
+ E TypeError: 'int' object is not iterable
+
+ failure_demo.py:148: TypeError
+ --------------------------- Captured stdout call ---------------------------
+ l is [1, 2, 3]
+ _______ TestRaises.test_some_error ________
+
+ self = <failure_demo.TestRaises object at 0xdeadbeef>
+
+ def test_some_error(self):
+ > if namenotexi:
+ E NameError: name 'namenotexi' is not defined
+
+ failure_demo.py:151: NameError
+ _______ test_dynamic_compile_shows_nicely ________
+
+ def test_dynamic_compile_shows_nicely():
+ src = 'def foo():\n assert 1 == 0\n'
+ name = 'abc-123'
+ module = py.std.imp.new_module(name)
+ code = _pytest._code.compile(src, name, 'exec')
+ py.builtin.exec_(code, module.__dict__)
+ py.std.sys.modules[name] = module
+ > module.foo()
+
+ failure_demo.py:166:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ def foo():
+ > assert 1 == 0
+ E assert 1 == 0
+
+ <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:163>:2: AssertionError
+ _______ TestMoreErrors.test_complex_error ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_complex_error(self):
+ def f():
+ return 44
+ def g():
+ return 43
+ > somefunc(f(), g())
+
+ failure_demo.py:176:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+ failure_demo.py:9: in somefunc
+ otherfunc(x,y)
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ a = 44, b = 43
+
+ def otherfunc(a,b):
+ > assert a==b
+ E assert 44 == 43
+
+ failure_demo.py:6: AssertionError
+ _______ TestMoreErrors.test_z1_unpack_error ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_z1_unpack_error(self):
+ l = []
+ > a,b = l
+ E ValueError: need more than 0 values to unpack
+
+ failure_demo.py:180: ValueError
+ _______ TestMoreErrors.test_z2_type_error ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_z2_type_error(self):
+ l = 3
+ > a,b = l
+ E TypeError: 'int' object is not iterable
+
+ failure_demo.py:184: TypeError
+ _______ TestMoreErrors.test_startswith ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_startswith(self):
+ s = "123"
+ g = "456"
+ > assert s.startswith(g)
+ E assert <built-in method startswith of str object at 0xdeadbeef>('456')
+ E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
+
+ failure_demo.py:189: AssertionError
+ _______ TestMoreErrors.test_startswith_nested ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_startswith_nested(self):
+ def f():
+ return "123"
+ def g():
+ return "456"
+ > assert f().startswith(g())
+ E assert <built-in method startswith of str object at 0xdeadbeef>('456')
+ E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
+ E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0xdeadbeef>()
+ E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0xdeadbeef>()
+
+ failure_demo.py:196: AssertionError
+ _______ TestMoreErrors.test_global_func ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_global_func(self):
+ > assert isinstance(globf(42), float)
+ E assert isinstance(43, float)
+ E + where 43 = globf(42)
+
+ failure_demo.py:199: AssertionError
+ _______ TestMoreErrors.test_instance ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_instance(self):
+ self.x = 6*7
+ > assert self.x != 42
+ E assert 42 != 42
+ E + where 42 = <failure_demo.TestMoreErrors object at 0xdeadbeef>.x
+
+ failure_demo.py:203: AssertionError
+ _______ TestMoreErrors.test_compare ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_compare(self):
+ > assert globf(10) < 5
+ E assert 11 < 5
+ E + where 11 = globf(10)
+
+ failure_demo.py:206: AssertionError
+ _______ TestMoreErrors.test_try_finally ________
+
+ self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
+
+ def test_try_finally(self):
+ x = 1
+ try:
+ > assert x == 0
+ E assert 1 == 0
+
+ failure_demo.py:211: AssertionError
+ _______ TestCustomAssertMsg.test_single_line ________
+
+ self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
+
+ def test_single_line(self):
+ class A:
+ a = 1
+ b = 2
+ > assert A.a == b, "A.a appears not to be b"
+ E AssertionError: A.a appears not to be b
+ E assert 1 == 2
+ E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_single_line.<locals>.A'>.a
+
+ failure_demo.py:222: AssertionError
+ _______ TestCustomAssertMsg.test_multiline ________
+
+ self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
+
+ def test_multiline(self):
+ class A:
+ a = 1
+ b = 2
+ > assert A.a == b, "A.a appears not to be b\n" \
+ "or does not appear to be b\none of those"
+ E AssertionError: A.a appears not to be b
+ E or does not appear to be b
+ E one of those
+ E assert 1 == 2
+ E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_multiline.<locals>.A'>.a
+
+ failure_demo.py:228: AssertionError
+ _______ TestCustomAssertMsg.test_custom_repr ________
+
+ self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
+
+ def test_custom_repr(self):
+ class JSON:
+ a = 1
+ def __repr__(self):
+ return "This is JSON\n{\n 'foo': 'bar'\n}"
+ a = JSON()
+ b = 2
+ > assert a.a == b, a
+ E AssertionError: This is JSON
+ E {
+ E 'foo': 'bar'
+ E }
+ E assert 1 == 2
+ E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
+
+ failure_demo.py:238: AssertionError
+ ======= 42 failed in 0.12 seconds ========
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst
new file mode 100644
index 000000000..be12d2afe
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/simple.rst
@@ -0,0 +1,751 @@
+
+.. highlightlang:: python
+
+Basic patterns and examples
+==========================================================
+
+Pass different values to a test function, depending on command line options
+----------------------------------------------------------------------------
+
+.. regendoc:wipe
+
+Suppose we want to write a test that depends on a command line option.
+Here is a basic pattern to achieve this::
+
+ # content of test_sample.py
+ def test_answer(cmdopt):
+ if cmdopt == "type1":
+ print ("first")
+ elif cmdopt == "type2":
+ print ("second")
+ assert 0 # to see what was printed
+
+
+For this to work we need to add a command line option and
+provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`::
+
+ # content of conftest.py
+ import pytest
+
+ def pytest_addoption(parser):
+ parser.addoption("--cmdopt", action="store", default="type1",
+ help="my option: type1 or type2")
+
+ @pytest.fixture
+ def cmdopt(request):
+ return request.config.getoption("--cmdopt")
+
+Let's run this without supplying our new option::
+
+ $ py.test -q test_sample.py
+ F
+ ======= FAILURES ========
+ _______ test_answer ________
+
+ cmdopt = 'type1'
+
+ def test_answer(cmdopt):
+ if cmdopt == "type1":
+ print ("first")
+ elif cmdopt == "type2":
+ print ("second")
+ > assert 0 # to see what was printed
+ E assert 0
+
+ test_sample.py:6: AssertionError
+ --------------------------- Captured stdout call ---------------------------
+ first
+ 1 failed in 0.12 seconds
+
+And now with supplying a command line option::
+
+ $ py.test -q --cmdopt=type2
+ F
+ ======= FAILURES ========
+ _______ test_answer ________
+
+ cmdopt = 'type2'
+
+ def test_answer(cmdopt):
+ if cmdopt == "type1":
+ print ("first")
+ elif cmdopt == "type2":
+ print ("second")
+ > assert 0 # to see what was printed
+ E assert 0
+
+ test_sample.py:6: AssertionError
+ --------------------------- Captured stdout call ---------------------------
+ second
+ 1 failed in 0.12 seconds
+
+You can see that the command line option arrived in our test. This
+completes the basic pattern. However, one often rather wants to process
+command line options outside of the test and rather pass in different or
+more complex objects.
+
+Dynamically adding command line options
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Through :confval:`addopts` you can statically add command line
+options for your project. You can also dynamically modify
+the command line arguments before they get processed::
+
+ # content of conftest.py
+ import sys
+ def pytest_cmdline_preparse(args):
+ if 'xdist' in sys.modules: # pytest-xdist plugin
+ import multiprocessing
+ num = max(multiprocessing.cpu_count() / 2, 1)
+ args[:] = ["-n", str(num)] + args
+
+If you have the :ref:`xdist plugin <xdist>` installed
+you will now always perform test runs using a number
+of subprocesses close to your CPU. Running in an empty
+directory with the above conftest.py::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. _`excontrolskip`:
+
+Control skipping of tests according to command line option
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Here is a ``conftest.py`` file adding a ``--runslow`` command
+line option to control skipping of ``slow`` marked tests::
+
+ # content of conftest.py
+
+ import pytest
+ def pytest_addoption(parser):
+ parser.addoption("--runslow", action="store_true",
+ help="run slow tests")
+
+We can now write a test module like this::
+
+ # content of test_module.py
+
+ import pytest
+
+
+ slow = pytest.mark.skipif(
+ not pytest.config.getoption("--runslow"),
+ reason="need --runslow option to run"
+ )
+
+
+ def test_func_fast():
+ pass
+
+
+ @slow
+ def test_func_slow():
+ pass
+
+and when running it will see a skipped "slow" test::
+
+ $ py.test -rs # "-rs" means report details on the little 's'
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py .s
+ ======= short test summary info ========
+ SKIP [1] test_module.py:14: need --runslow option to run
+
+ ======= 1 passed, 1 skipped in 0.12 seconds ========
+
+Or run it including the ``slow`` marked test::
+
+ $ py.test --runslow
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py ..
+
+ ======= 2 passed in 0.12 seconds ========
+
+Writing well integrated assertion helpers
+--------------------------------------------------
+
+.. regendoc:wipe
+
+If you have a test helper function called from a test you can
+use the ``pytest.fail`` marker to fail a test with a certain message.
+The test support function will not show up in the traceback if you
+set the ``__tracebackhide__`` option somewhere in the helper function.
+Example::
+
+ # content of test_checkconfig.py
+ import pytest
+ def checkconfig(x):
+ __tracebackhide__ = True
+ if not hasattr(x, "config"):
+ pytest.fail("not configured: %s" %(x,))
+
+ def test_something():
+ checkconfig(42)
+
+The ``__tracebackhide__`` setting influences ``pytest`` showing
+of tracebacks: the ``checkconfig`` function will not be shown
+unless the ``--fulltrace`` command line option is specified.
+Let's run our little function::
+
+ $ py.test -q test_checkconfig.py
+ F
+ ======= FAILURES ========
+ _______ test_something ________
+
+ def test_something():
+ > checkconfig(42)
+ E Failed: not configured: 42
+
+ test_checkconfig.py:8: Failed
+ 1 failed in 0.12 seconds
+
+Detect if running from within a pytest run
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+Usually it is a bad idea to make application code
+behave differently if called from a test. But if you
+absolutely must find out if your application code is
+running from a test you can do something like this::
+
+ # content of conftest.py
+
+ def pytest_configure(config):
+ import sys
+ sys._called_from_test = True
+
+ def pytest_unconfigure(config):
+ del sys._called_from_test
+
+and then check for the ``sys._called_from_test`` flag::
+
+ if hasattr(sys, '_called_from_test'):
+ # called from within a test run
+ else:
+ # called "normally"
+
+accordingly in your application. It's also a good idea
+to use your own application module rather than ``sys``
+for handling flag.
+
+Adding info to test report header
+--------------------------------------------------------------
+
+.. regendoc:wipe
+
+It's easy to present extra information in a ``pytest`` run::
+
+ # content of conftest.py
+
+ def pytest_report_header(config):
+ return "project deps: mylib-1.1"
+
+which will add the string to the test header accordingly::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ project deps: mylib-1.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+.. regendoc:wipe
+
+You can also return a list of strings which will be considered as several
+lines of information. You can of course also make the amount of reporting
+information on e.g. the value of ``config.option.verbose`` so that
+you present more information appropriately::
+
+ # content of conftest.py
+
+ def pytest_report_header(config):
+ if config.option.verbose > 0:
+ return ["info1: did you know that ...", "did you?"]
+
+which will add info only when run with "--v"::
+
+ $ py.test -v
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
+ cachedir: .cache
+ info1: did you know that ...
+ did you?
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collecting ... collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+and nothing when run plainly::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 0 items
+
+ ======= no tests ran in 0.12 seconds ========
+
+profiling test duration
+--------------------------
+
+.. regendoc:wipe
+
+.. versionadded: 2.2
+
+If you have a slow running large test suite you might want to find
+out which tests are the slowest. Let's make an artificial test suite::
+
+ # content of test_some_are_slow.py
+
+ import time
+
+ def test_funcfast():
+ pass
+
+ def test_funcslow1():
+ time.sleep(0.1)
+
+ def test_funcslow2():
+ time.sleep(0.2)
+
+Now we can profile which test functions execute the slowest::
+
+ $ py.test --durations=3
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 3 items
+
+ test_some_are_slow.py ...
+
+ ======= slowest 3 test durations ========
+ 0.20s call test_some_are_slow.py::test_funcslow2
+ 0.10s call test_some_are_slow.py::test_funcslow1
+ 0.00s setup test_some_are_slow.py::test_funcfast
+ ======= 3 passed in 0.12 seconds ========
+
+incremental testing - test steps
+---------------------------------------------------
+
+.. regendoc:wipe
+
+Sometimes you may have a testing situation which consists of a series
+of test steps. If one step fails it makes no sense to execute further
+steps as they are all expected to fail anyway and their tracebacks
+add no insight. Here is a simple ``conftest.py`` file which introduces
+an ``incremental`` marker which is to be used on classes::
+
+ # content of conftest.py
+
+ import pytest
+
+ def pytest_runtest_makereport(item, call):
+ if "incremental" in item.keywords:
+ if call.excinfo is not None:
+ parent = item.parent
+ parent._previousfailed = item
+
+ def pytest_runtest_setup(item):
+ if "incremental" in item.keywords:
+ previousfailed = getattr(item.parent, "_previousfailed", None)
+ if previousfailed is not None:
+ pytest.xfail("previous test failed (%s)" %previousfailed.name)
+
+These two hook implementations work together to abort incremental-marked
+tests in a class. Here is a test module example::
+
+ # content of test_step.py
+
+ import pytest
+
+ @pytest.mark.incremental
+ class TestUserHandling:
+ def test_login(self):
+ pass
+ def test_modification(self):
+ assert 0
+ def test_deletion(self):
+ pass
+
+ def test_normal():
+ pass
+
+If we run this::
+
+ $ py.test -rx
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 4 items
+
+ test_step.py .Fx.
+ ======= short test summary info ========
+ XFAIL test_step.py::TestUserHandling::()::test_deletion
+ reason: previous test failed (test_modification)
+
+ ======= FAILURES ========
+ _______ TestUserHandling.test_modification ________
+
+ self = <test_step.TestUserHandling object at 0xdeadbeef>
+
+ def test_modification(self):
+ > assert 0
+ E assert 0
+
+ test_step.py:9: AssertionError
+ ======= 1 failed, 2 passed, 1 xfailed in 0.12 seconds ========
+
+We'll see that ``test_deletion`` was not executed because ``test_modification``
+failed. It is reported as an "expected failure".
+
+
+Package/Directory-level fixtures (setups)
+-------------------------------------------------------
+
+If you have nested test directories, you can have per-directory fixture scopes
+by placing fixture functions in a ``conftest.py`` file in that directory
+You can use all types of fixtures including :ref:`autouse fixtures
+<autouse fixtures>` which are the equivalent of xUnit's setup/teardown
+concept. It's however recommended to have explicit fixture references in your
+tests or test classes rather than relying on implicitly executing
+setup/teardown functions, especially if they are far away from the actual tests.
+
+Here is a an example for making a ``db`` fixture available in a directory::
+
+ # content of a/conftest.py
+ import pytest
+
+ class DB:
+ pass
+
+ @pytest.fixture(scope="session")
+ def db():
+ return DB()
+
+and then a test module in that directory::
+
+ # content of a/test_db.py
+ def test_a1(db):
+ assert 0, db # to show value
+
+another test module::
+
+ # content of a/test_db2.py
+ def test_a2(db):
+ assert 0, db # to show value
+
+and then a module in a sister directory which will not see
+the ``db`` fixture::
+
+ # content of b/test_error.py
+ def test_root(db): # no db here, will error out
+ pass
+
+We can run this::
+
+ $ py.test
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 7 items
+
+ test_step.py .Fx.
+ a/test_db.py F
+ a/test_db2.py F
+ b/test_error.py E
+
+ ======= ERRORS ========
+ _______ ERROR at setup of test_root ________
+ file $REGENDOC_TMPDIR/b/test_error.py, line 1
+ def test_root(db): # no db here, will error out
+ fixture 'db' not found
+ available fixtures: record_xml_property, recwarn, cache, capsys, pytestconfig, tmpdir_factory, capfd, monkeypatch, tmpdir
+ use 'py.test --fixtures [testpath]' for help on them.
+
+ $REGENDOC_TMPDIR/b/test_error.py:1
+ ======= FAILURES ========
+ _______ TestUserHandling.test_modification ________
+
+ self = <test_step.TestUserHandling object at 0xdeadbeef>
+
+ def test_modification(self):
+ > assert 0
+ E assert 0
+
+ test_step.py:9: AssertionError
+ _______ test_a1 ________
+
+ db = <conftest.DB object at 0xdeadbeef>
+
+ def test_a1(db):
+ > assert 0, db # to show value
+ E AssertionError: <conftest.DB object at 0xdeadbeef>
+ E assert 0
+
+ a/test_db.py:2: AssertionError
+ _______ test_a2 ________
+
+ db = <conftest.DB object at 0xdeadbeef>
+
+ def test_a2(db):
+ > assert 0, db # to show value
+ E AssertionError: <conftest.DB object at 0xdeadbeef>
+ E assert 0
+
+ a/test_db2.py:2: AssertionError
+ ======= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========
+
+The two test modules in the ``a`` directory see the same ``db`` fixture instance
+while the one test in the sister-directory ``b`` doesn't see it. We could of course
+also define a ``db`` fixture in that sister directory's ``conftest.py`` file.
+Note that each fixture is only instantiated if there is a test actually needing
+it (unless you use "autouse" fixture which are always executed ahead of the first test
+executing).
+
+
+post-process test reports / failures
+---------------------------------------
+
+If you want to postprocess test reports and need access to the executing
+environment you can implement a hook that gets called when the test
+"report" object is about to be created. Here we write out all failing
+test calls and also access a fixture (if it was used by the test) in
+case you want to query/look at it during your post processing. In our
+case we just write some informations out to a ``failures`` file::
+
+ # content of conftest.py
+
+ import pytest
+ import os.path
+
+ @pytest.hookimpl(tryfirst=True, hookwrapper=True)
+ def pytest_runtest_makereport(item, call):
+ # execute all other hooks to obtain the report object
+ outcome = yield
+ rep = outcome.get_result()
+
+ # we only look at actual failing test calls, not setup/teardown
+ if rep.when == "call" and rep.failed:
+ mode = "a" if os.path.exists("failures") else "w"
+ with open("failures", mode) as f:
+ # let's also access a fixture for the fun of it
+ if "tmpdir" in item.fixturenames:
+ extra = " (%s)" % item.funcargs["tmpdir"]
+ else:
+ extra = ""
+
+ f.write(rep.nodeid + extra + "\n")
+
+
+if you then have failing tests::
+
+ # content of test_module.py
+ def test_fail1(tmpdir):
+ assert 0
+ def test_fail2():
+ assert 0
+
+and run them::
+
+ $ py.test test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 2 items
+
+ test_module.py FF
+
+ ======= FAILURES ========
+ _______ test_fail1 ________
+
+ tmpdir = local('PYTEST_TMPDIR/test_fail10')
+
+ def test_fail1(tmpdir):
+ > assert 0
+ E assert 0
+
+ test_module.py:2: AssertionError
+ _______ test_fail2 ________
+
+ def test_fail2():
+ > assert 0
+ E assert 0
+
+ test_module.py:4: AssertionError
+ ======= 2 failed in 0.12 seconds ========
+
+you will have a "failures" file which contains the failing test ids::
+
+ $ cat failures
+ test_module.py::test_fail1 (PYTEST_TMPDIR/test_fail10)
+ test_module.py::test_fail2
+
+Making test result information available in fixtures
+-----------------------------------------------------------
+
+.. regendoc:wipe
+
+If you want to make test result reports available in fixture finalizers
+here is a little example implemented via a local plugin::
+
+ # content of conftest.py
+
+ import pytest
+
+ @pytest.hookimpl(tryfirst=True, hookwrapper=True)
+ def pytest_runtest_makereport(item, call):
+ # execute all other hooks to obtain the report object
+ outcome = yield
+ rep = outcome.get_result()
+
+ # set an report attribute for each phase of a call, which can
+ # be "setup", "call", "teardown"
+
+ setattr(item, "rep_" + rep.when, rep)
+
+
+ @pytest.fixture
+ def something(request):
+ def fin():
+ # request.node is an "item" because we use the default
+ # "function" scope
+ if request.node.rep_setup.failed:
+ print ("setting up a test failed!", request.node.nodeid)
+ elif request.node.rep_setup.passed:
+ if request.node.rep_call.failed:
+ print ("executing test failed", request.node.nodeid)
+ request.addfinalizer(fin)
+
+
+if you then have failing tests::
+
+ # content of test_module.py
+
+ import pytest
+
+ @pytest.fixture
+ def other():
+ assert 0
+
+ def test_setup_fails(something, other):
+ pass
+
+ def test_call_fails(something):
+ assert 0
+
+ def test_fail2():
+ assert 0
+
+and run it::
+
+ $ py.test -s test_module.py
+ ======= test session starts ========
+ platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
+ rootdir: $REGENDOC_TMPDIR, inifile:
+ collected 3 items
+
+ test_module.py Esetting up a test failed! test_module.py::test_setup_fails
+ Fexecuting test failed test_module.py::test_call_fails
+ F
+
+ ======= ERRORS ========
+ _______ ERROR at setup of test_setup_fails ________
+
+ @pytest.fixture
+ def other():
+ > assert 0
+ E assert 0
+
+ test_module.py:6: AssertionError
+ ======= FAILURES ========
+ _______ test_call_fails ________
+
+ something = None
+
+ def test_call_fails(something):
+ > assert 0
+ E assert 0
+
+ test_module.py:12: AssertionError
+ _______ test_fail2 ________
+
+ def test_fail2():
+ > assert 0
+ E assert 0
+
+ test_module.py:15: AssertionError
+ ======= 2 failed, 1 error in 0.12 seconds ========
+
+You'll see that the fixture finalizers could use the precise reporting
+information.
+
+Integrating pytest runner and cx_freeze
+-----------------------------------------------------------
+
+If you freeze your application using a tool like
+`cx_freeze <http://cx-freeze.readthedocs.org>`_ in order to distribute it
+to your end-users, it is a good idea to also package your test runner and run
+your tests using the frozen application.
+
+This way packaging errors such as dependencies not being
+included into the executable can be detected early while also allowing you to
+send test files to users so they can run them in their machines, which can be
+invaluable to obtain more information about a hard to reproduce bug.
+
+Unfortunately ``cx_freeze`` can't discover them
+automatically because of ``pytest``'s use of dynamic module loading, so you
+must declare them explicitly by using ``pytest.freeze_includes()``::
+
+ # contents of setup.py
+ from cx_Freeze import setup, Executable
+ import pytest
+
+ setup(
+ name="app_main",
+ executables=[Executable("app_main.py")],
+ options={"build_exe":
+ {
+ 'includes': pytest.freeze_includes()}
+ },
+ # ... other options
+ )
+
+If you don't want to ship a different executable just in order to run your tests,
+you can make your program check for a certain flag and pass control
+over to ``pytest`` instead. For example::
+
+ # contents of app_main.py
+ import sys
+
+ if len(sys.argv) > 1 and sys.argv[1] == '--pytest':
+ import pytest
+ sys.exit(pytest.main(sys.argv[2:]))
+ else:
+ # normal application execution: at this point argv can be parsed
+ # by your argument-parsing library of choice as usual
+ ...
+
+This makes it convenient to execute your tests from within your frozen
+application, using standard ``py.test`` command-line options::
+
+ ./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/special.rst b/testing/web-platform/tests/tools/pytest/doc/en/example/special.rst
new file mode 100644
index 000000000..58e66d44e
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/special.rst
@@ -0,0 +1,72 @@
+A session-fixture which can look at all collected tests
+----------------------------------------------------------------
+
+A session-scoped fixture effectively has access to all
+collected test items. Here is an example of a fixture
+function which walks all collected tests and looks
+if their test class defines a ``callme`` method and
+calls it::
+
+ # content of conftest.py
+
+ import pytest
+
+ @pytest.fixture(scope="session", autouse=True)
+ def callattr_ahead_of_alltests(request):
+ print ("callattr_ahead_of_alltests called")
+ seen = set([None])
+ session = request.node
+ for item in session.items:
+ cls = item.getparent(pytest.Class)
+ if cls not in seen:
+ if hasattr(cls.obj, "callme"):
+ cls.obj.callme()
+ seen.add(cls)
+
+test classes may now define a ``callme`` method which
+will be called ahead of running any tests::
+
+ # content of test_module.py
+
+ class TestHello:
+ @classmethod
+ def callme(cls):
+ print ("callme called!")
+
+ def test_method1(self):
+ print ("test_method1 called")
+
+ def test_method2(self):
+ print ("test_method1 called")
+
+ class TestOther:
+ @classmethod
+ def callme(cls):
+ print ("callme other called")
+ def test_other(self):
+ print ("test other")
+
+ # works with unittest as well ...
+ import unittest
+
+ class SomeTest(unittest.TestCase):
+ @classmethod
+ def callme(self):
+ print ("SomeTest callme called")
+
+ def test_unit1(self):
+ print ("test_unit1 method called")
+
+If you run this without output capturing::
+
+ $ py.test -q -s test_module.py
+ callattr_ahead_of_alltests called
+ callme called!
+ callme other called
+ SomeTest callme called
+ test_method1 called
+ .test_method1 called
+ .test other
+ .test_unit1 method called
+ .
+ 4 passed in 0.12 seconds
diff --git a/testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py b/testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py
new file mode 100644
index 000000000..5648575e8
--- /dev/null
+++ b/testing/web-platform/tests/tools/pytest/doc/en/example/xfail_demo.py
@@ -0,0 +1,30 @@
+import pytest
+xfail = pytest.mark.xfail
+
+@xfail
+def test_hello():
+ assert 0
+
+@xfail(run=False)
+def test_hello2():
+ assert 0
+
+@xfail("hasattr(os, 'sep')")
+def test_hello3():
+ assert 0
+
+@xfail(reason="bug 110")
+def test_hello4():
+ assert 0
+
+@xfail('pytest.__version__[0] != "17"')
+def test_hello5():
+ assert 0
+
+def test_hello6():
+ pytest.xfail("reason")
+
+@xfail(raises=IndexError)
+def test_hello7():
+ x = []
+ x[1] = 1